diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ce3c9e6f --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +*.gcno +*.gcda +*.gcov +*.so +*.o diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..a54d21c5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,3 @@ +[*] +indent_style = tab +indent_size = 4 diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..b1e98a96 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,26 @@ + + + +### Problem description + +Explain your problem here (it's always better to provide reproduction steps) ... + + + +### Environment + + + + + + + + + + + diff --git a/.gitignore b/.gitignore index 9cf8da8f..1bc422a5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .deps -isolation_output results/* regression.diffs regression.out @@ -9,6 +8,9 @@ regression.out *.gcda *.gcno *.gcov +*.log pg_pathman--*.sql tags cscope* +Dockerfile +testgres diff --git a/.travis.yml b/.travis.yml index fd0e57ed..411c98aa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,33 +1,34 @@ -os: - - linux +os: linux -sudo: required -dist: trusty +dist: focal language: c -compiler: - - clang - - gcc +services: + - docker -before_install: - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get -y install -qq wget ca-certificates; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-postgres.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then source ./travis/dep-ubuntu-llvm.sh; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get update -qq; fi +install: + - ./mk_dockerfile.sh + - docker-compose build -env: - global: - - LLVM_VER=4.0 - matrix: - - PG_VER=10 CHECK_CODE=true - - PG_VER=10 CHECK_CODE=false - - PG_VER=9.6 CHECK_CODE=true - - PG_VER=9.6 CHECK_CODE=false - - PG_VER=9.5 CHECK_CODE=true - - PG_VER=9.5 CHECK_CODE=false +script: + - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests -script: bash ./travis/pg-travis-test.sh +notifications: + email: + on_success: change + on_failure: always -after_success: - - bash <(curl -s https://codecov.io/bash) +env: + - PG_VERSION=16 LEVEL=hardcore + - PG_VERSION=16 + - PG_VERSION=15 LEVEL=hardcore + - PG_VERSION=15 + - PG_VERSION=14 LEVEL=hardcore + - PG_VERSION=14 + - PG_VERSION=13 LEVEL=hardcore + - PG_VERSION=13 + - PG_VERSION=12 LEVEL=hardcore + - PG_VERSION=12 + - PG_VERSION=11 LEVEL=hardcore + - PG_VERSION=11 diff --git a/Dockerfile.tmpl b/Dockerfile.tmpl new file mode 100644 index 00000000..4dd24ca5 --- /dev/null +++ b/Dockerfile.tmpl @@ -0,0 +1,40 @@ +FROM postgres:${PG_VERSION}-alpine + +# Install dependencies +RUN apk add --no-cache \ + openssl curl git patch \ + cmocka-dev \ + perl perl-ipc-run \ + python3 python3-dev py3-virtualenv \ + coreutils linux-headers \ + make musl-dev gcc bison flex \ + zlib-dev libedit-dev \ + pkgconf icu-dev clang clang15 clang-analyzer; + +# Install fresh valgrind +RUN apk add valgrind \ + --update-cache \ + --repository http://dl-3.alpinelinux.org/alpine/edge/main; + +# Environment +ENV LANG=C.UTF-8 PGDATA=/pg/data + +# Make directories +RUN mkdir -p ${PGDATA} && \ + mkdir -p /pg/testdir + +# Add data to test dir +ADD . /pg/testdir + +# Grant privileges +RUN chown -R postgres:postgres ${PGDATA} && \ + chown -R postgres:postgres /pg/testdir && \ + chmod a+rwx /usr/local/share/postgresql/extension && \ + find /usr/local/lib/postgresql -type d -print0 | xargs -0 chmod a+rwx + +COPY run_tests.sh /run.sh +RUN chmod 755 /run.sh + +USER postgres +WORKDIR /pg/testdir +ENTRYPOINT LEVEL=${LEVEL} /run.sh diff --git a/META.json b/META.json index d4c01616..c32d74ba 100644 --- a/META.json +++ b/META.json @@ -1,12 +1,10 @@ { "name": "pg_pathman", - "abstract": "Partitioning tool", - "description": "The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions.", - "version": "1.4.0", + "abstract": "Fast partitioning tool for PostgreSQL", + "description": "pg_pathman provides optimized partitioning mechanism and functions to manage partitions.", + "version": "1.5.12", "maintainer": [ - "Ildar Musin ", - "Dmitry Ivanov ", - "Ildus Kurbangaliev " + "Arseny Sher " ], "license": "postgresql", "resources": { @@ -19,13 +17,13 @@ "type": "git" } }, - "generated_by": "Ildar Musin", + "generated_by": "pgpro", "provides": { "pg_pathman": { - "file": "pg_pathman--1.4.sql", + "file": "pg_pathman--1.5.sql", "docfile": "README.md", - "version": "1.4.0", - "abstract": "Partitioning tool" + "version": "1.5.12", + "abstract": "Effective partitioning tool for PostgreSQL 9.5 and higher" } }, "meta-spec": { @@ -35,6 +33,14 @@ "tags": [ "partitioning", "partition", - "optimization" + "optimization", + "table", + "tables", + "custom node", + "runtime append", + "background worker", + "fdw", + "range", + "hash" ] } diff --git a/Makefile b/Makefile index 8b8fa036..f6780044 100644 --- a/Makefile +++ b/Makefile @@ -3,59 +3,98 @@ MODULE_big = pg_pathman OBJS = src/init.o src/relation_info.o src/utils.o src/partition_filter.o \ - src/runtimeappend.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ + src/runtime_append.o src/runtime_merge_append.o src/pg_pathman.o src/rangeset.o \ src/pl_funcs.o src/pl_range_funcs.o src/pl_hash_funcs.o src/pathman_workers.o \ src/hooks.o src/nodes_common.o src/xact_handling.o src/utility_stmt_hooking.o \ src/planner_tree_modification.o src/debug_print.o src/partition_creation.o \ - src/compat/pg_compat.o src/compat/relation_tags.o src/compat/expand_rte_hook.o \ - src/compat/rowmarks_fix.o $(WIN32RES) + src/compat/pg_compat.o src/compat/rowmarks_fix.o src/partition_router.o \ + src/partition_overseer.o $(WIN32RES) +ifdef USE_PGXS override PG_CPPFLAGS += -I$(CURDIR)/src/include +else +override PG_CPPFLAGS += -I$(top_srcdir)/$(subdir)/src/include +endif EXTENSION = pg_pathman -EXTVERSION = 1.4 +EXTVERSION = 1.5 DATA_built = pg_pathman--$(EXTVERSION).sql DATA = pg_pathman--1.0--1.1.sql \ pg_pathman--1.1--1.2.sql \ pg_pathman--1.2--1.3.sql \ - pg_pathman--1.3--1.4.sql + pg_pathman--1.3--1.4.sql \ + pg_pathman--1.4--1.5.sql PGFILEDESC = "pg_pathman - partitioning tool for PostgreSQL" +ifneq (pg_pathman,$(filter pg_pathman,$(PG_TEST_SKIP))) REGRESS = pathman_array_qual \ pathman_basic \ pathman_bgw \ + pathman_cache_pranks \ pathman_calamity \ pathman_callbacks \ pathman_column_type \ pathman_cte \ pathman_domains \ + pathman_dropped_cols \ pathman_expressions \ pathman_foreign_keys \ + pathman_gaps \ pathman_inserts \ pathman_interval \ pathman_join_clause \ pathman_lateral \ + pathman_hashjoin \ pathman_mergejoin \ pathman_only \ pathman_param_upd_del \ pathman_permissions \ + pathman_rebuild_deletes \ pathman_rebuild_updates \ pathman_rowmarks \ pathman_runtime_nodes \ - pathman_update_trigger \ - pathman_utility_stmt + pathman_subpartitions \ + pathman_update_node \ + pathman_update_triggers \ + pathman_upd_del \ + pathman_utility_stmt \ + pathman_views \ + pathman_CVE-2020-14350 +endif -EXTRA_REGRESS_OPTS=--temp-config=$(top_srcdir)/$(subdir)/conf.add +ISOLATION = insert_nodes for_update rollback_on_create_partitions -EXTRA_CLEAN = pg_pathman--$(EXTVERSION).sql ./isolation_output +REGRESS_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add +ISOLATION_OPTS = --temp-config $(top_srcdir)/$(subdir)/conf.add + +CMOCKA_EXTRA_CLEAN = missing_basic.o missing_list.o missing_stringinfo.o missing_bitmapset.o rangeset_tests.o rangeset_tests +EXTRA_CLEAN = $(patsubst %,tests/cmocka/%, $(CMOCKA_EXTRA_CLEAN)) ifdef USE_PGXS -PG_CONFIG = pg_config +PG_CONFIG=pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) +VNUM := $(shell $(PG_CONFIG) --version | awk '{print $$2}') + +# check for declarative syntax +# this feature will not be ported to >=12 +ifeq ($(VNUM),$(filter 10% 11%,$(VNUM))) +REGRESS += pathman_declarative +OBJS += src/declarative.o +override PG_CPPFLAGS += -DENABLE_DECLARATIVE +endif + +# We cannot run isolation test for versions 12,13 in PGXS case +# because 'pg_isolation_regress' is not copied to install +# directory, see src/test/isolation/Makefile +ifeq ($(VNUM),$(filter 12% 13%,$(VNUM))) +undefine ISOLATION +undefine ISOLATION_OPTS +endif + include $(PGXS) else subdir = contrib/pg_pathman @@ -67,20 +106,14 @@ endif $(EXTENSION)--$(EXTVERSION).sql: init.sql hash.sql range.sql cat $^ > $@ -ISOLATIONCHECKS=insert_nodes for_update rollback_on_create_partitions - -submake-isolation: - $(MAKE) -C $(top_builddir)/src/test/isolation all - -isolationcheck: | submake-isolation - $(MKDIR_P) isolation_output - $(pg_isolation_regress_check) \ - --temp-config=$(top_srcdir)/$(subdir)/conf.add \ - --outputdir=./isolation_output \ - $(ISOLATIONCHECKS) - python_tests: - $(MAKE) -C tests/python partitioning_tests + $(MAKE) -C tests/python partitioning_tests CASE=$(CASE) cmocka_tests: $(MAKE) -C tests/cmocka check + +clean_gcov: + find . \ + -name "*.gcda" -delete -o \ + -name "*.gcno" -delete -o \ + -name "*.gcov" -delete diff --git a/README.md b/README.md index d53ad374..1394bc6f 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,27 @@ -[![Build Status](https://travis-ci.org/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.org/postgrespro/pg_pathman) +[![Build Status](https://travis-ci.com/postgrespro/pg_pathman.svg?branch=master)](https://travis-ci.com/postgrespro/pg_pathman) [![PGXN version](https://badge.fury.io/pg/pg_pathman.svg)](https://badge.fury.io/pg/pg_pathman) [![codecov](https://codecov.io/gh/postgrespro/pg_pathman/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/pg_pathman) [![GitHub license](https://img.shields.io/badge/license-PostgreSQL-blue.svg)](https://raw.githubusercontent.com/postgrespro/pg_pathman/master/LICENSE) +### NOTE: this project is not under development anymore + +`pg_pathman` supports Postgres versions [11..15], but most probably it won't be ported to later releases. [Native partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) is pretty mature now and has almost everything implemented in `pg_pathman`'; we encourage users switching to it. We are still maintaining the project (fixing bugs in supported versions), but no new development is going to happen here. + # pg_pathman The `pg_pathman` module provides optimized partitioning mechanism and functions to manage partitions. The extension is compatible with: - * PostgreSQL 9.5, 9.6, 10; - * Postgres Pro Standard 9.5, 9.6; + + * PostgreSQL 12, 13; + * PostgreSQL with core-patch: 11, 14, 15; + * Postgres Pro Standard 11, 12, 13, 14, 15; * Postgres Pro Enterprise; -By the way, we have a growing Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). +Take a look at our Wiki [out there](https://github.com/postgrespro/pg_pathman/wiki). ## Overview -**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT. For example: +**Partitioning** means splitting one large table into smaller pieces. Each row in such table is moved to a single partition according to the partitioning key. PostgreSQL <= 10 supports partitioning via table inheritance: each partition must be created as a child table with CHECK CONSTRAINT: ```plpgsql CREATE TABLE test (id SERIAL PRIMARY KEY, title TEXT); @@ -23,6 +29,16 @@ CREATE TABLE test_1 (CHECK ( id >= 100 AND id < 200 )) INHERITS (test); CREATE TABLE test_2 (CHECK ( id >= 200 AND id < 300 )) INHERITS (test); ``` +PostgreSQL 10 provides native partitioning: + +```plpgsql +CREATE TABLE test(id int4, value text) PARTITION BY RANGE(id); +CREATE TABLE test_1 PARTITION OF test FOR VALUES FROM (1) TO (10); +CREATE TABLE test_2 PARTITION OF test FOR VALUES FROM (10) TO (20); +``` + +It's not so different from the classic approach; there are implicit check constraints, and most of its limitations are still relevant. + Despite the flexibility, this approach forces the planner to perform an exhaustive search and to check constraints on each partition to determine whether it should be present in the plan or not. Large amount of partitions may result in significant planning overhead. The `pg_pathman` module features partition managing functions and optimized planning mechanism which utilizes knowledge of the partitions' structure. It stores partitioning configuration in the `pathman_config` table; each row contains a single entry for a partitioned table (relation name, partitioning column and its type). During the initialization stage the `pg_pathman` module caches some information about child partitions in the shared memory, which is used later for plan construction. Before a SELECT query is executed, `pg_pathman` traverses the condition tree in search of expressions like: @@ -47,45 +63,53 @@ More interesting features are yet to come. Stay tuned! * HASH and RANGE partitioning schemes; * Partitioning by expression and composite key; - * Both automatic and manual partition management; + * Both automatic and manual [partition management](#post-creation-partition-management); * Support for integer, floating point, date and other types, including domains; * Effective query planning for partitioned tables (JOINs, subselects etc); * `RuntimeAppend` & `RuntimeMergeAppend` custom plan nodes to pick partitions at runtime; - * `PartitionFilter`: an efficient drop-in replacement for INSERT triggers; + * [`PartitionFilter`](#custom-plan-nodes): an efficient drop-in replacement for INSERT triggers; + * [`PartitionRouter`](#custom-plan-nodes) and [`PartitionOverseer`](#custom-plan-nodes) for cross-partition UPDATE queries (instead of triggers); * Automatic partition creation for new INSERTed data (only for RANGE partitioning); - * Improved `COPY FROM\TO` statement that is able to insert rows directly into partitions; - * UPDATE triggers generation out of the box (will be replaced with custom nodes too); - * User-defined callbacks for partition creation event handling; - * Non-blocking concurrent table partitioning; + * Improved `COPY FROM` statement that is able to insert rows directly into partitions; + * [User-defined callbacks](#additional-parameters) for partition creation event handling; + * Non-blocking [concurrent table partitioning](#data-migration); * FDW support (foreign partitions); - * Various GUC toggles and configurable settings. - -## Roadmap - - * Multi-level partitioning (ver 1.5); - * Improved referential integrity + foreign keys on partitioned tables (ver 1.5); - -Take a look at [this page](https://github.com/postgrespro/pg_pathman/wiki/Roadmap); + * Various [GUC](#disabling-pg_pathman) toggles and configurable settings. + * Partial support of [`declarative partitioning`](#declarative-partitioning) (from PostgreSQL 10). ## Installation guide To install `pg_pathman`, execute this in the module's directory: + ```shell make install USE_PGXS=1 ``` + +> **Important:** Don't forget to set the `PG_CONFIG` variable (`make PG_CONFIG=...`) in case you want to test `pg_pathman` on a non-default or custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). + Modify the **`shared_preload_libraries`** parameter in `postgresql.conf` as following: + ``` shared_preload_libraries = 'pg_pathman' ``` + > **Important:** `pg_pathman` may cause conflicts with some other extensions that use the same hook functions. For example, `pg_pathman` uses `ProcessUtility_hook` to handle COPY queries for partitioned tables, which means it may interfere with `pg_stat_statements` from time to time. In this case, try listing libraries in certain order: `shared_preload_libraries = 'pg_stat_statements, pg_pathman'`. It is essential to restart the PostgreSQL instance. After that, execute the following query in psql: ```plpgsql -CREATE EXTENSION pg_pathman; +CREATE SCHEMA pathman; +GRANT USAGE ON SCHEMA pathman TO PUBLIC; +CREATE EXTENSION pg_pathman WITH SCHEMA pathman; ``` Done! Now it's time to setup your partitioning schemes. -> **Important:** Don't forget to set the `PG_CONFIG` variable in case you want to test `pg_pathman` on a custom build of PostgreSQL. Read more [here](https://wiki.postgresql.org/wiki/Building_and_Installing_PostgreSQL_Extension_Modules). +> **Security notice**: pg_pathman is believed to be secure against +search-path-based attacks mentioned in Postgres +[documentation](https://www.postgresql.org/docs/current/sql-createextension.html). However, +if *your* calls of pathman's functions doesn't exactly match the signature, they +might be vulnerable to malicious overloading. If in doubt, install pathman to clean schema where nobody except superusers have CREATE object permission to avoid problems. + +> **Windows-specific**: pg_pathman imports several symbols (e.g. None_Receiver, InvalidObjectAddress) from PostgreSQL, which is fine by itself, but requires that those symbols are marked as `PGDLLIMPORT`. Unfortunately, some of them are not exported from vanilla PostgreSQL, which means that you have to either use Postgres Pro Standard/Enterprise (which includes all necessary patches), or patch and build your own distribution of PostgreSQL. ## How to update In order to update pg_pathman: @@ -95,17 +119,24 @@ In order to update pg_pathman: 3. Execute the following queries: ```plpgsql -/* replace X.Y with the version number, e.g. 1.3 */ -ALTER EXTENSION pg_pathman UPDATE TO "X.Y"; +/* only required for major releases, e.g. 1.4 -> 1.5 */ +ALTER EXTENSION pg_pathman UPDATE; SET pg_pathman.enable = t; ``` ## Available functions +### Module's version + +```plpgsql +pathman_version() +``` +Although it's possible to get major and minor version numbers using `\dx pg_pathman`, it doesn't show the actual [patch number](http://semver.org/). This function returns a complete version number of the loaded pg_pathman module in `MAJOR.MINOR.PATCH` format. + ### Partition creation ```plpgsql -create_hash_partitions(relation REGCLASS, - expr TEXT, +create_hash_partitions(parent_relid REGCLASS, + expression TEXT, partitions_count INTEGER, partition_data BOOLEAN DEFAULT TRUE, partition_names TEXT[] DEFAULT NULL, @@ -114,21 +145,21 @@ create_hash_partitions(relation REGCLASS, Performs HASH partitioning for `relation` by partitioning expression `expr`. The `partitions_count` parameter specifies the number of partitions to create; it cannot be changed afterwards. If `partition_data` is `true` then all the data will be automatically copied from the parent table to partitions. Note that data migration may took a while to finish and the table will be locked until transaction commits. See `partition_table_concurrently()` for a lock-free way to migrate data. Partition creation callback is invoked for each partition if set beforehand (see `set_init_callback()`). ```plpgsql -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER DEFAULT NULL partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, p_interval INTERVAL, p_count INTEGER DEFAULT NULL, partition_data BOOLEAN DEFAULT TRUE) -create_range_partitions(relation REGCLASS, +create_range_partitions(parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, partition_names TEXT[] DEFAULT NULL, @@ -164,57 +195,51 @@ stop_concurrent_part_task(relation REGCLASS) Stops a background worker performing a concurrent partitioning task. Note: worker will exit after it finishes relocating a current batch. ### Triggers -```plpgsql -create_hash_update_trigger(parent REGCLASS) -``` -Creates the trigger on UPDATE for HASH partitions. The UPDATE trigger isn't created by default because of the overhead. It's useful in cases when the partitioning expression's value might change. -```plpgsql -create_range_update_trigger(parent REGCLASS) -``` -Same as above, but for a RANGE-partitioned table. + +Triggers are no longer required nor for INSERTs, neither for cross-partition UPDATEs. However, user-supplied triggers *are supported*: + +* Each **inserted row** results in execution of `BEFORE/AFTER INSERT` trigger functions of a *corresponding partition*. +* Each **updated row** results in execution of `BEFORE/AFTER UPDATE` trigger functions of a *corresponding partition*. +* Each **moved row** (cross-partition update) results in execution of `BEFORE UPDATE` + `BEFORE/AFTER DELETE` + `BEFORE/AFTER INSERT` trigger functions of *corresponding partitions*. ### Post-creation partition management ```plpgsql replace_hash_partition(old_partition REGCLASS, new_partition REGCLASS, - lock_parent BOOL DEFAULT TRUE) + lock_parent BOOLEAN DEFAULT TRUE) ``` Replaces specified partition of HASH-partitioned table with another table. The `lock_parent` parameter will prevent any INSERT/UPDATE/ALTER TABLE queries to parent table. ```plpgsql -split_range_partition(partition REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL) +split_range_partition(partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) ``` Split RANGE `partition` in two by `split_value`. Partition creation callback is invoked for a new partition if available. ```plpgsql -merge_range_partitions(partition1 REGCLASS, partition2 REGCLASS) -``` -Merge two adjacent RANGE partitions. First, data from `partition2` is copied to `partition1`, then `partition2` is removed. - -```plpgsql -merge_range_partitions(partitions REGCLASS[]) +merge_range_partitions(variadic partitions REGCLASS[]) ``` -Merge several adjacent RANGE partitions (partitions must be specified in ascending or descending order). All the data will be accumulated in the first partition. +Merge several adjacent RANGE partitions. Partitions are automatically ordered by increasing bounds; all the data will be accumulated in the first partition. ```plpgsql -append_range_partition(parent REGCLASS, +append_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Append new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -prepend_range_partition(parent REGCLASS, +prepend_range_partition(parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) ``` Prepend new RANGE partition with `pathman_config.range_interval` as interval. ```plpgsql -add_range_partition(relation REGCLASS, +add_range_partition(parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, @@ -228,29 +253,48 @@ drop_range_partition(partition TEXT, delete_data BOOLEAN DEFAULT TRUE) Drop RANGE partition and all of its data if `delete_data` is true. ```plpgsql -attach_range_partition(relation REGCLASS, - partition REGCLASS, - start_value ANYELEMENT, - end_value ANYELEMENT) +attach_range_partition(parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) ``` Attach partition to the existing RANGE-partitioned relation. The attached table must have exactly the same structure as the parent table, including the dropped columns. Partition creation callback is invoked if set (see `pathman_config_params`). ```plpgsql -detach_range_partition(partition REGCLASS) +detach_range_partition(partition_relid REGCLASS) ``` Detach partition from the existing RANGE-partitioned relation. ```plpgsql -disable_pathman_for(relation TEXT) +disable_pathman_for(parent_relid REGCLASS) ``` Permanently disable `pg_pathman` partitioning mechanism for the specified parent table and remove the insert trigger if it exists. All partitions and data remain unchanged. ```plpgsql -drop_partitions(parent REGCLASS, - delete_data BOOLEAN DEFAULT FALSE) +drop_partitions(parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) ``` Drop partitions of the `parent` table (both foreign and local relations). If `delete_data` is `false`, the data is copied to the parent table first. Default is `false`. +To remove partitioned table along with all partitions fully, use conventional +`DROP TABLE relation CASCADE`. However, care should be taken in somewhat rare +case when you are running logical replication and `DROP` was executed by +replication apply worker, e.g. via trigger on replicated table. `pg_pathman` +uses `pathman_ddl_trigger` event trigger to remove the record about dropped +table from `pathman_config`, and this trigger by default won't fire on replica, +leading to inconsistent state when `pg_pathman` thinks that the table still +exists, but in fact it doesn't. If this is the case, configure this trigger to +fire on replica too: + +```plpgsql +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE ALWAYS; +``` + +Physical replication doesn't have this problem since DDL as well as +`pathman_config` table is replicated too; master and slave PostgreSQL instances +are basically identical, and it is only harmful to keep this trigger in `ALWAYS` +mode. + ### Additional parameters @@ -263,7 +307,7 @@ Update RANGE partitioned table interval. Note that interval must not be negative ```plpgsql set_enable_parent(relation REGCLASS, value BOOLEAN) ``` -Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` or `create_partitions_from_range()` functions. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. +Include/exclude parent table into/from query plan. In original PostgreSQL planner parent table is always included into query plan even if it's empty which can lead to additional overhead. You can use `disable_parent()` if you are never going to use parent table as a storage. Default value depends on the `partition_data` parameter that was specified during initial partitioning in `create_range_partitions()` function. If the `partition_data` parameter was `true` then all data have already been migrated to partitions and parent table disabled. Otherwise it is enabled. ```plpgsql set_auto(relation REGCLASS, value BOOLEAN) @@ -273,7 +317,7 @@ Enable/disable auto partition propagation (only for RANGE partitioning). It is e ```plpgsql set_init_callback(relation REGCLASS, callback REGPROC DEFAULT 0) ``` -Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: +Set partition creation callback to be invoked for each attached or created partition (both HASH and RANGE). If callback is marked with SECURITY INVOKER, it's executed with the privileges of the user that produced a statement which has led to creation of a new partition (e.g. `INSERT INTO partitioned_table VALUES (-5)`). The callback must have the following signature: `part_init_callback(args JSONB) RETURNS VOID`. Parameter `arg` consists of several fields whose presence depends on partitioning type: ```json /* RANGE-partitioned table abc (child abc_4) */ { @@ -320,7 +364,7 @@ CREATE TABLE IF NOT EXISTS pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT TRUE, auto BOOLEAN NOT NULL DEFAULT TRUE, - init_callback REGPROCEDURE NOT NULL DEFAULT 0, + init_callback TEXT DEFAULT NULL, spawn_using_bgw BOOLEAN NOT NULL DEFAULT FALSE); ``` This table stores optional parameters which override standard behavior. @@ -380,6 +424,26 @@ AS SELECT * FROM @extschema@.show_cache_stats(); ``` Shows memory consumption of various caches. +## Declarative partitioning + +From PostgreSQL 10 `ATTACH PARTITION`, `DETACH PARTITION` +and `CREATE TABLE .. PARTITION OF` commands could be used with tables +partitioned by `pg_pathman`: + +```plpgsql +CREATE TABLE child1 (LIKE partitioned_table); + +--- attach new partition +ALTER TABLE partitioned_table ATTACH PARTITION child1 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +--- detach the partition +ALTER TABLE partitioned_table DETACH PARTITION child1; + +-- create a partition +CREATE TABLE child2 PARTITION OF partitioned_table + FOR VALUES IN ('2015-05-01', '2015-06-01'); +``` ## Custom plan nodes `pg_pathman` provides a couple of [custom plan nodes](https://wiki.postgresql.org/wiki/CustomScanAPI) which aim to reduce execution time, namely: @@ -387,6 +451,8 @@ Shows memory consumption of various caches. - `RuntimeAppend` (overrides `Append` plan node) - `RuntimeMergeAppend` (overrides `MergeAppend` plan node) - `PartitionFilter` (drop-in replacement for INSERT triggers) +- `PartitionOverseer` (implements cross-partition UPDATEs) +- `PartitionRouter` (implements cross-partition UPDATEs) `PartitionFilter` acts as a *proxy node* for INSERT's child scan, which means it can redirect output tuples to the corresponding partition: @@ -403,6 +469,29 @@ SELECT generate_series(1, 10), random(); (4 rows) ``` +`PartitionOverseer` and `PartitionRouter` are another *proxy nodes* used +in conjunction with `PartitionFilter` to enable cross-partition UPDATEs +(i.e. when update of partitioning key requires that we move row to another +partition). Since this node has a great deal of side effects (ordinary `UPDATE` becomes slower; +cross-partition `UPDATE` is transformed into `DELETE + INSERT`), +it is disabled by default. +To enable it, refer to the list of [GUCs](#disabling-pg_pathman) below. + +```plpgsql +EXPLAIN (COSTS OFF) +UPDATE partitioned_table +SET value = value + 1 WHERE value = 2; + QUERY PLAN +--------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on partitioned_table_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Seq Scan on partitioned_table_2 + Filter: (value = 2) +(6 rows) +``` + `RuntimeAppend` and `RuntimeMergeAppend` have much in common: they come in handy in a case when WHERE condition takes form of: ``` VARIABLE OP PARAM @@ -531,7 +620,7 @@ SELECT tableoid::regclass AS partition, * FROM partitioned_table; - All running concurrent partitioning tasks can be listed using the `pathman_concurrent_part_tasks` view: ```plpgsql SELECT * FROM pathman_concurrent_part_tasks; - userid | pid | dbid | relid | processed | status + userid | pid | dbid | relid | processed | status --------+------+-------+-------+-----------+--------- dmitry | 7367 | 16384 | test | 472000 | working (1 row) @@ -545,7 +634,7 @@ WHERE parent = 'part_test'::regclass AND range_min::int < 500; NOTICE: 1 rows copied from part_test_11 NOTICE: 100 rows copied from part_test_1 NOTICE: 100 rows copied from part_test_2 - drop_range_partition + drop_range_partition ---------------------- dummy_test_11 dummy_test_1 @@ -553,7 +642,7 @@ NOTICE: 100 rows copied from part_test_2 (3 rows) ``` -- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` GUC variable. +- You can turn foreign tables into partitions using the `attach_range_partition()` function. Rows that were meant to be inserted into parent will be redirected to foreign partitions (as usual, PartitionFilter will be involved), though by default it is prohibited to insert rows into partitions provided not by `postgres_fdw`. Only superuser is allowed to set `pg_pathman.insert_into_fdw` [GUC](#disabling-pg_pathman) variable. ### HASH partitioning Consider an example of HASH partitioning. First create a table with some integer column: @@ -683,7 +772,8 @@ There are several user-accessible [GUC](https://www.postgresql.org/docs/9.5/stat - `pg_pathman.enable` --- disable (or enable) `pg_pathman` **completely** - `pg_pathman.enable_runtimeappend` --- toggle `RuntimeAppend` custom node on\off - `pg_pathman.enable_runtimemergeappend` --- toggle `RuntimeMergeAppend` custom node on\off - - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off + - `pg_pathman.enable_partitionfilter` --- toggle `PartitionFilter` custom node on\off (for INSERTs) + - `pg_pathman.enable_partitionrouter` --- toggle `PartitionRouter` custom node on\off (for cross-partition UPDATEs) - `pg_pathman.enable_auto_partition` --- toggle automatic partition creation on\off (per session) - `pg_pathman.enable_bounds_cache` --- toggle bounds cache on\off (faster updates of partitioning scheme) - `pg_pathman.insert_into_fdw` --- allow INSERTs into various FDWs `(disabled | postgres | any_fdw)` @@ -699,8 +789,8 @@ All sections and data will remain unchanged and will be handled by the standard Do not hesitate to post your issues, questions and new ideas at the [issues](https://github.com/postgrespro/pg_pathman/issues) page. ## Authors -Ildar Musin Postgres Professional Ltd., Russia -Alexander Korotkov Postgres Professional Ltd., Russia -Dmitry Ivanov Postgres Professional Ltd., Russia -Maksim Milyutin Postgres Professional Ltd., Russia -Ildus Kurbangaliev Postgres Professional Ltd., Russia +[Ildar Musin](https://github.com/zilder) +[Alexander Korotkov](https://github.com/akorotkov) +[Dmitry Ivanov](https://github.com/funbringer) +[Maksim Milyutin](https://github.com/maksm90) +[Ildus Kurbangaliev](https://github.com/ildus) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..0544d859 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,3 @@ +services: + tests: + build: . diff --git a/expected/for_update.out b/expected/for_update.out index 3e41031e..ffd425e4 100644 --- a/expected/for_update.out +++ b/expected/for_update.out @@ -2,37 +2,49 @@ Parsed test spec with 2 sessions starting permutation: s1_b s1_update s2_select s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select: select * from test_tbl where id = 1; -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 step s1_r: rollback; starting permutation: s1_b s1_update s2_select_locked s1_r create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_r: rollback; step s2_select_locked: <... completed> -id val +id|val +--+--- + 1| 1 +(1 row) -1 1 starting permutation: s1_b s1_update s2_select_locked s1_c create_range_partitions +----------------------- + 10 +(1 row) -10 step s1_b: begin; step s1_update: update test_tbl set id = 2 where id = 1; step s2_select_locked: select * from test_tbl where id = 1 for share; step s1_c: commit; step s2_select_locked: <... completed> -id val +id|val +--+--- +(0 rows) diff --git a/expected/insert_nodes.out b/expected/insert_nodes.out index 64758aef..8f725216 100644 --- a/expected/insert_nodes.out +++ b/expected/insert_nodes.out @@ -2,122 +2,126 @@ Parsed test spec with 2 sessions starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) starting permutation: s1b s1_insert_150 s1r s1_show_partitions s2b s2_insert_300 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +(2 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_300 s1r s1_show_partitions s2b s2_insert_150 s2c s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; -step s1_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s1_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) step s2b: BEGIN; step s2_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2c: COMMIT; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) starting permutation: s1b s1_insert_150 s2b s2_insert_300 s1r s2r s2_show_partitions set_spawn_using_bgw +------------------- + +(1 row) - step s1b: BEGIN; step s1_insert_150: INSERT INTO range_rel SELECT generate_series(1, 150); step s2b: BEGIN; step s2_insert_300: INSERT INTO range_rel SELECT generate_series(151, 300); step s1r: ROLLBACK; step s2r: ROLLBACK; -step s2_show_partitions: SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step s2_show_partitions: SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; -consrc +pg_get_constraintdef +------------------------------------ +CHECK (((id >= 1) AND (id < 101))) +CHECK (((id >= 101) AND (id < 201))) +CHECK (((id >= 201) AND (id < 301))) +(3 rows) - -((id >= 1) AND (id < 101)) - -((id >= 101) AND (id < 201)) - -((id >= 201) AND (id < 301)) diff --git a/expected/pathman_CVE-2020-14350.out b/expected/pathman_CVE-2020-14350.out new file mode 100644 index 00000000..a48e182f --- /dev/null +++ b/expected/pathman_CVE-2020-14350.out @@ -0,0 +1,116 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS pathman_regress_hacker; +SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; +CREATE EXTENSION pg_pathman; +CREATE ROLE pathman_regress_hacker LOGIN; +-- Test 1 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('test1'); + partition_table_concurrently +------------------------------ + +(1 row) + +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Test 2 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test2 values(1); +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + is_superuser +-------------- + off +(1 row) + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table test1_0 +drop cascades to table test1_1 +drop cascades to table test1_2 +drop cascades to table test1_3 +drop cascades to table test1_4 +DROP TABLE test2 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to sequence test2_seq +drop cascades to table test2_1 +drop cascades to table test2_2 +DROP ROLE pathman_regress_hacker; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual.out b/expected/pathman_array_qual.out index 36ec268d..0587a1c8 100644 --- a/expected/pathman_array_qual.out +++ b/expected/pathman_array_qual.out @@ -1,3 +1,7 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -2398,6 +2402,7 @@ EXECUTE q(100); (1 row) DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_1.out b/expected/pathman_array_qual_1.out new file mode 100644 index 00000000..dd7d2485 --- /dev/null +++ b/expected/pathman_array_qual_1.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +-------------------- + Seq Scan on test_1 + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +--------------------- + Seq Scan on test_1 + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > 500) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_2 + Filter: (a > 101) + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 550) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_7 + Filter: (a > 700) + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_6 + Filter: (a > 600) + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_9 + Filter: (a > 898) + -> Seq Scan on test_10 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_array_qual_2.out b/expected/pathman_array_qual_2.out new file mode 100644 index 00000000..ab504858 --- /dev/null +++ b/expected/pathman_array_qual_2.out @@ -0,0 +1,2398 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA array_qual; +CREATE TABLE array_qual.test(val TEXT NOT NULL); +CREATE SEQUENCE array_qual.test_seq; +SELECT add_to_pathman_config('array_qual.test', 'val', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('array_qual.test', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + array_qual.test_1 +(1 row) + +SELECT add_range_partition('array_qual.test', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + array_qual.test_2 +(1 row) + +SELECT add_range_partition('array_qual.test', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + array_qual.test_3 +(1 row) + +SELECT add_range_partition('array_qual.test', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + array_qual.test_4 +(1 row) + +INSERT INTO array_qual.test VALUES ('aaaa'); +INSERT INTO array_qual.test VALUES ('bbbb'); +INSERT INTO array_qual.test VALUES ('cccc'); +ANALYZE; +/* + * Test expr op ANY (...) + */ +/* matching collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b']); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'z']); + QUERY PLAN +-------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 +(5 rows) + +/* different collations */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" < ANY (array['a', 'b']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val < ANY (array['a', 'b' COLLATE "POSIX"]); + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_2 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_3 + Filter: (val < 'b'::text COLLATE "POSIX") + -> Seq Scan on test_4 + Filter: (val < 'b'::text COLLATE "POSIX") +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "C" < ANY (array['a', 'b' COLLATE "POSIX"]); +ERROR: collation mismatch between explicit collations "C" and "POSIX" at character 95 +/* different collations (pruning should work) */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val COLLATE "POSIX" = ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: ((val)::text = ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: ((val)::text = ANY ('{a,b}'::text[])) +(5 rows) + +/* non-btree operator */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE val ~~ ANY (array['a', 'b']); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_2 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_3 + Filter: (val ~~ ANY ('{a,b}'::text[])) + -> Seq Scan on test_4 + Filter: (val ~~ ANY ('{a,b}'::text[])) +(9 rows) + +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE array_qual.test(a INT4 NOT NULL, b INT4); +SELECT create_range_partitions('array_qual.test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO array_qual.test SELECT i, i FROM generate_series(1, 1000) g(i); +ANALYZE; +/* + * Test expr IN (...) + */ +/* a IN (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{1,2,3,4}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, 100); + QUERY PLAN +----------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{-100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (-100, -200, -300, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a IN (NULL, NULL, NULL, NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* b IN (...) - pruning should not work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (1, 2, 3, 4); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{1,2,3,4}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (100, 200, 300, 400); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{100,200,300,400}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, 100); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,100}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,100}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (-100, -200, -300, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{-100,-200,-300,NULL}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE b IN (NULL, NULL, NULL, NULL); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_2 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_3 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_4 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_5 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_6 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_7 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_8 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_9 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) + -> Seq Scan on test_10 + Filter: (b = ANY ('{NULL,NULL,NULL,NULL}'::integer[])) +(21 rows) + +/* + * Test expr = ANY (...) + */ +/* a = ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ANY ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[100, 200, 300, 400]); + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{100,200,300,400}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400]]); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ANY ('{{100,200},{300,400},{NULL,NULL}}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ANY ('{{100,200},{300,NULL}}'::integer[])) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr = ALL (...) + */ +/* a = ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a = ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a = ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 100]); + QUERY PLAN +---------------------------------------------- + Seq Scan on test_1 test + Filter: (a = ALL ('{100,100}'::integer[])) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[100, 200, 300, 400]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, 400], array[NULL, NULL]::int4[]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[array[100, 200], array[300, NULL]]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a = ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* + * Test expr < ANY (...) + */ +/* a < ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + Filter: (a < 550) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < 700) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + Filter: (a < ANY ('{NULL,700}'::integer[])) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ANY (array[NULL, 700]); + count +------- + 699 +(1 row) + +/* + * Test expr < ALL (...) + */ +/* a < ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a < ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a < ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 100]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[99, 100, 101]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 99) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[500, 550]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + Filter: (a < 500) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[100, 700]); + QUERY PLAN +------------------------- + Seq Scan on test_1 test + Filter: (a < 100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a < ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a < ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (...) + */ +/* a > ANY (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[99, 100, 101]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 99) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > 500) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[100, 700]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > ANY ('{NULL,700}'::integer[])) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ANY (array[NULL, 700]); + count +------- + 300 +(1 row) + +/* + * Test expr > ALL (...) + */ +/* a > ALL (...) - pruning should work */ +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (NULL); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[]::int4[]); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_2 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_3 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_4 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_5 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_6 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_7 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_8 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_9 + Filter: (a > ALL ('{}'::integer[])) + -> Seq Scan on test_10 + Filter: (a > ALL ('{}'::integer[])) +(21 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 100]); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 100) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[99, 100, 101]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_2 test_1 + Filter: (a > 101) + -> Seq Scan on test_3 test_2 + -> Seq Scan on test_4 test_3 + -> Seq Scan on test_5 test_4 + -> Seq Scan on test_6 test_5 + -> Seq Scan on test_7 test_6 + -> Seq Scan on test_8 test_7 + -> Seq Scan on test_9 test_8 + -> Seq Scan on test_10 test_9 +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[500, 550]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 550) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[100, 700]); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_7 test_1 + Filter: (a > 700) + -> Seq Scan on test_8 test_2 + -> Seq Scan on test_9 test_3 + -> Seq Scan on test_10 test_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, NULL]::int4[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT count(*) FROM array_qual.test WHERE a > ALL (array[NULL, 700]); + count +------- + 0 +(1 row) + +/* + * Test expr > ANY (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[$1, 100, 600])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on test_1 + Filter: (a > 1) + -> Seq Scan on test_2 + -> Seq Scan on test_3 + -> Seq Scan on test_4 + -> Seq Scan on test_5 + -> Seq Scan on test_6 + -> Seq Scan on test_7 + -> Seq Scan on test_8 + -> Seq Scan on test_9 + -> Seq Scan on test_10 +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[100, 600, $1])) +(22 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ANY (array[NULL, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on test_5 test_1 + Filter: (a > ANY ('{NULL,500}'::integer[])) + -> Seq Scan on test_6 test_2 + -> Seq Scan on test_7 test_3 + -> Seq Scan on test_8 test_4 + -> Seq Scan on test_9 test_5 + -> Seq Scan on test_10 test_6 +(8 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_1 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_2 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_3 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_4 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_5 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_6 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_7 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_8 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_9 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) + -> Seq Scan on test_10 test + Filter: (a > ANY (ARRAY[NULL::integer, $1])) +(22 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +/* + * Test expr > ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 1000000, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, NULL, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[NULL, $1, NULL]); +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(500); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXECUTE q(NULL); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 100, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, $1, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, $1, 600])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, $1, 600])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[100, 600, $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[100, 600, $1])) +(12 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, NULL], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], array[1, $1]]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(999); + QUERY PLAN +-------------------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], ARRAY[1, $1]])) +(12 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 5 +DEALLOCATE q; +PREPARE q(int4[]) AS SELECT * FROM array_qual.test WHERE a > ALL (array[array[100, 600], $1]); +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_6 test_1 + Filter: (a > 600) + -> Seq Scan on test_7 test_2 + -> Seq Scan on test_8 test_3 + -> Seq Scan on test_9 test_4 + -> Seq Scan on test_10 test_5 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 1}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +EXPLAIN (COSTS OFF) EXECUTE q('{1, 999}'); + QUERY PLAN +---------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_6 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_7 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_8 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY['{100,600}'::integer[], $1])) +(12 rows) + +/* check query plan: EXECUTE q('{1, 999}') */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(''{1, 999}'')'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q('{1, 999}'): number of partitions: 1 +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a > ALL (array[$1, 898]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on test_9 test_1 + Filter: (a > 898) + -> Seq Scan on test_10 test_2 +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(900); /* check quals optimization */ + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_9 test + Filter: (a > ALL (ARRAY[$1, 898])) + -> Seq Scan on test_10 test + Filter: (a > ALL (ARRAY[$1, 898])) +(6 rows) + +EXECUTE q(1000); + a | b +---+--- +(0 rows) + +/* check query plan: EXECUTE q(999) */ +DO language plpgsql +$$ + DECLARE + query text; + result jsonb; + num int; + + BEGIN + query := 'EXECUTE q(999)'; + + EXECUTE format('EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, FORMAT JSON) %s', query) + INTO result; + + SELECT count(*) FROM jsonb_array_elements_text(result->0->'Plan'->'Plans') INTO num; + + RAISE notice '%: number of partitions: %', query, num; + END +$$; +NOTICE: EXECUTE q(999): number of partitions: 1 +DEALLOCATE q; +/* + * Test expr = ALL (... $1 ...) + */ +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[$1, 100, 600]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, 600, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +DEALLOCATE q; +PREPARE q(int4) AS SELECT * FROM array_qual.test WHERE a = ALL (array[100, $1]); +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXPLAIN (COSTS OFF) EXECUTE q(1); + QUERY PLAN +--------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (test.a = ALL (ARRAY[100, $1])) + -> Seq Scan on test_1 test + Filter: (a = ALL (ARRAY[100, $1])) +(4 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(100); + a | b +-----+----- + 100 | 100 +(1 row) + +DEALLOCATE q; +DROP TABLE array_qual.test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA array_qual; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_basic.out b/expected/pathman_basic.out index 231786bd..3afde299 100644 --- a/expected/pathman_basic.out +++ b/expected/pathman_basic.out @@ -1,3 +1,11 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -18,7 +26,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM \set VERBOSITY terse ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); @@ -147,7 +155,7 @@ PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM \set VERBOSITY terse ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); @@ -441,6 +449,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -451,6 +467,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ----------------------------------- @@ -501,6 +534,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- @@ -565,6 +608,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; Filter: (value = 2) (3 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (2 = value) +(3 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; QUERY PLAN ------------------------------ @@ -575,6 +626,23 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; Filter: (value = 1) (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 = id) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; QUERY PLAN ---------------------------------------------------------------- @@ -645,6 +713,16 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; -> Seq Scan on range_rel_4 (5 rows) +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; QUERY PLAN ------------------------------- @@ -740,41 +818,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test. -> Index Scan using range_rel_2_dt_idx on range_rel_2 (4 rows) -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - QUERY PLAN -------------------------------------------------------------------------------------------- - Sort - Sort Key: j2.dt - -> Hash Join - Hash Cond: (j3.id = j2.id) - -> Append - -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 - -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 - -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 - -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 - -> Hash - -> Hash Join - Hash Cond: (j2.id = j1.id) - -> Append - -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 - -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 - -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 - -> Hash - -> Append - -> Index Scan using range_rel_1_pkey on range_rel_1 j1 - -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 -(20 rows) - /* * Test inlined SQL functions */ @@ -840,7 +883,7 @@ NOTICE: drop cascades to 4 other objects SELECT pathman.split_range_partition('test.num_range_rel_1', 500); split_range_partition ----------------------- - {0,1000} + test.num_range_rel_5 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -853,17 +896,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 Index Cond: (id <= 700) (5 rows) +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); - split_range_partition -------------------------- - {01-01-2015,02-01-2015} + split_range_partition +----------------------- + test.range_rel_5 (1 row) /* Merge two partitions into one */ SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); merge_range_partitions ------------------------ - + test.num_range_rel_1 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; @@ -877,7 +928,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 70 SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) /* Append and prepend partitions */ @@ -1017,7 +1068,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' A (6 rows) SELECT pathman.detach_range_partition('test.range_rel_archive'); -NOTICE: trigger "range_rel_upd_trig" for relation "test.range_rel_archive" does not exist, skipping detach_range_partition ------------------------ test.range_rel_archive @@ -1138,7 +1188,7 @@ SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); split_range_partition ----------------------- - {50,70} + test."test.zero_60" (1 row) DROP TABLE test.zero CASCADE; @@ -1269,6 +1319,34 @@ NOTICE: 1000 rows copied from test.num_range_rel_3 DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -1333,16 +1411,16 @@ INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); */ ALTER TABLE test.range_rel DROP COLUMN data; SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr -----------------+------+----------+----------------+------------------------------------------------------------------------------------------------------------------------- - test.range_rel | dt | 2 | @ 10 days | {VAR :varno 1 :varattno 2 :vartype 1114 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 2 :location 8} + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days (1 row) DROP TABLE test.range_rel CASCADE; NOTICE: drop cascades to 21 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ----------+------+----------+----------------+------------- + partrel | expr | parttype | range_interval +---------+------+----------+---------------- (0 rows) /* Check overlaps */ @@ -1398,55 +1476,8 @@ SELECT * FROM test."TeSt"; 1 | 1 (3 rows) -SELECT pathman.create_update_triggers('test."TeSt"'); - create_update_triggers ------------------------- - -(1 row) - -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -SELECT * FROM test."TeSt" WHERE a = 1; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; - QUERY PLAN ----------------------------- - Append - -> Seq Scan on "TeSt_2" - Filter: (a = 1) -(3 rows) - -SELECT pathman.drop_partitions('test."TeSt"'); -NOTICE: 0 rows copied from test."TeSt_0" -NOTICE: 0 rows copied from test."TeSt_1" -NOTICE: 3 rows copied from test."TeSt_2" - drop_partitions ------------------ - 3 -(1 row) - -SELECT * FROM test."TeSt"; - a | b ----+--- - 1 | 3 - 1 | 2 - 1 | 1 -(3 rows) - DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects CREATE TABLE test."RangeRel" ( id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, @@ -1474,21 +1505,21 @@ SELECT pathman.prepend_range_partition('test."RangeRel"'); SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); merge_range_partitions ------------------------ - + test."RangeRel_1" (1 row) SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); - split_range_partition -------------------------- - {12-31-2014,01-02-2015} + split_range_partition +----------------------- + test."RangeRel_6" (1 row) DROP TABLE test."RangeRel" CASCADE; NOTICE: drop cascades to 6 other objects SELECT * FROM pathman.pathman_config; - partrel | expr | parttype | range_interval | cooked_expr ---------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - test.num_range_rel | id | 2 | 1000 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 (1 row) CREATE TABLE test."RangeRel" ( @@ -1544,13 +1575,13 @@ SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 mo SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); merge_range_partitions ------------------------ - + test.range_rel_1 (1 row) SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); - split_range_partition -------------------------- - {01-01-2010,03-01-2010} + split_range_partition +----------------------- + test.range_rel_13 (1 row) SELECT append_range_partition('test.range_rel'); @@ -1583,72 +1614,13 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; -> Seq Scan on range_rel_14 (4 rows) -/* Temporary table for JOINs */ -CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO test.tmp VALUES (1, 1), (2, 2); -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Update on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) - -UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - id | dt | value ------+--------------------------+------- - 166 | Tue Jun 15 00:00:00 2010 | 111 -(1 row) - -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; - QUERY PLAN --------------------------------------------------------------------------------- - Delete on range_rel_6 - -> Seq Scan on range_rel_6 - Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) -(3 rows) - -DELETE FROM test.range_rel WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; - id | dt | value -----+----+------- -(0 rows) - -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Update on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) -(7 rows) - -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - QUERY PLAN --------------------------------------------------------------------------------------------- - Delete on range_rel_1 r - -> Hash Join - Hash Cond: (t.id = r.id) - -> Seq Scan on tmp t - -> Hash - -> Index Scan using range_rel_1_pkey on range_rel_1 r - Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) -(7 rows) - -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); -NOTICE: 44 rows copied from test.range_rel_1 +NOTICE: 45 rows copied from test.range_rel_1 NOTICE: 31 rows copied from test.range_rel_3 NOTICE: 30 rows copied from test.range_rel_4 NOTICE: 31 rows copied from test.range_rel_5 -NOTICE: 29 rows copied from test.range_rel_6 +NOTICE: 30 rows copied from test.range_rel_6 NOTICE: 31 rows copied from test.range_rel_7 NOTICE: 31 rows copied from test.range_rel_8 NOTICE: 30 rows copied from test.range_rel_9 @@ -1840,7 +1812,40 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; NOTICE: drop cascades to 2 other objects -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 29 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_1.out b/expected/pathman_basic_1.out new file mode 100644 index 00000000..92a86727 --- /dev/null +++ b/expected/pathman_basic_1.out @@ -0,0 +1,1834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------- + Seq Scan on improved_dummy_11 + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_3 + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_2 + -> Seq Scan on num_range_rel_3 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + -> Seq Scan on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +------------------------- + Seq Scan on range_rel_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_0 + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +-------------------------------- + Limit + -> Seq Scan on sql_inline_2 + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +---------------------------------------------------------- + Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +----------------------------- + Seq Scan on num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity + -> Seq Scan on range_rel_8 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on range_rel_6 + -> Seq Scan on range_rel_plus_infinity +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hash_rel_extern + -> Seq Scan on hash_rel_1 + -> Seq Scan on hash_rel_2 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +------------------------------------------------------ + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on range_rel_15 + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_13 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_basic_2.out b/expected/pathman_basic_2.out new file mode 100644 index 00000000..ec180fdb --- /dev/null +++ b/expected/pathman_basic_2.out @@ -0,0 +1,1834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER); +INSERT INTO test.hash_rel VALUES (1, 1); +INSERT INTO test.hash_rel VALUES (2, 2); +INSERT INTO test.hash_rel VALUES (3, 3); +\set VERBOSITY default +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); +ERROR: failed to analyze partitioning expression "value" +DETAIL: column "value" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.hash_rel ALTER COLUMN value SET NOT NULL; +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3, partition_data:=false); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- +(0 rows) + +SELECT pathman.set_enable_parent('test.hash_rel', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel hash_rel_1 + -> Seq Scan on hash_rel_0 hash_rel_2 + -> Seq Scan on hash_rel_1 hash_rel_3 + -> Seq Scan on hash_rel_2 hash_rel_4 +(5 rows) + +SELECT * FROM test.hash_rel; + id | value +----+------- + 1 | 1 + 2 | 2 + 3 | 3 +(3 rows) + +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 0 rows copied from test.hash_rel_0 +NOTICE: 0 rows copied from test.hash_rel_1 +NOTICE: 0 rows copied from test.hash_rel_2 + drop_partitions +----------------- + 3 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'Value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.hash_rel VALUES (4, 4); +INSERT INTO test.hash_rel VALUES (5, 5); +INSERT INTO test.hash_rel VALUES (6, 6); +SELECT COUNT(*) FROM test.hash_rel; + count +------- + 6 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +\set VERBOSITY default +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL); +ERROR: failed to analyze partitioning expression "dt" +DETAIL: column "dt" should be marked NOT NULL +CONTEXT: SQL statement "SELECT pathman.validate_expression(parent_relid, expression)" +PL/pgSQL function pathman.prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT pathman.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function pathman.create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +\set VERBOSITY terse +ALTER TABLE test.range_rel ALTER COLUMN dt SET NOT NULL; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '1 month'::INTERVAL, 2); +ERROR: not enough partitions to fit all values of "dt" +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.range_rel; + count +------- + 120 +(1 row) + +SELECT COUNT(*) FROM ONLY test.range_rel; + count +------- + 0 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT COUNT(*) FROM test.num_range_rel; + count +------- + 3000 +(1 row) + +SELECT COUNT(*) FROM ONLY test.num_range_rel; + count +------- + 0 +(1 row) + +/* since rel_1_2_beta: check append_child_relation(), make_ands_explicit(), dummy path */ +CREATE TABLE test.improved_dummy (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO test.improved_dummy (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.improved_dummy', 'id', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test.improved_dummy (name) VALUES ('test'); /* spawns new partition */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on improved_dummy_1 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 improved_dummy_2 + Filter: (id = 101) +(5 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_1 improved_dummy_2 + Filter: ((id = 5) AND (name = 'ib'::text)) + -> Seq Scan on improved_dummy_11 improved_dummy_3 + Filter: (id = 101) +(7 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', false); /* disable parent */ + set_enable_parent +------------------- + +(1 row) + +ALTER TABLE test.improved_dummy_1 ADD CHECK (name != 'ib'); /* make test.improved_dummy_1 disappear */ +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +---------------------------------------------- + Seq Scan on improved_dummy_11 improved_dummy + Filter: (id = 101) +(2 rows) + +SELECT pathman.set_enable_parent('test.improved_dummy', true); /* enable parent */ + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.improved_dummy WHERE id = 101 OR id = 5 AND name = 'ib'; + QUERY PLAN +-------------------------------------------------------------------- + Append + -> Seq Scan on improved_dummy improved_dummy_1 + Filter: ((id = 101) OR ((id = 5) AND (name = 'ib'::text))) + -> Seq Scan on improved_dummy_11 improved_dummy_2 + Filter: (id = 101) +(5 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 12 other objects +/* since rel_1_4_beta: check create_range_partitions(bounds array) */ +CREATE TABLE test.improved_dummy (val INT NOT NULL); +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2)); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + test.improved_dummy | test.improved_dummy_1 | 2 | val | 1 | 2 + test.improved_dummy | test.improved_dummy_2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from test.improved_dummy_1 +NOTICE: 0 rows copied from test.improved_dummy_2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +SELECT pathman.drop_partitions('test.improved_dummy'); +NOTICE: 0 rows copied from p1 +NOTICE: 0 rows copied from p2 + drop_partitions +----------------- + 2 +(1 row) + +SELECT pathman.create_range_partitions('test.improved_dummy', 'val', + pathman.generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}', + tablespaces := '{pg_default, pg_default}'); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman.pathman_partition_list +WHERE parent = 'test.improved_dummy'::REGCLASS +ORDER BY partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------+----------+------+-----------+----------- + test.improved_dummy | p1 | 2 | val | 1 | 2 + test.improved_dummy | p2 | 2 | val | 2 | 3 +(2 rows) + +DROP TABLE test.improved_dummy CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test pathman_rel_pathlist_hook() with INSERT query */ +CREATE TABLE test.insert_into_select(val int NOT NULL); +INSERT INTO test.insert_into_select SELECT generate_series(1, 100); +SELECT pathman.create_range_partitions('test.insert_into_select', 'val', 1, 20); + create_range_partitions +------------------------- + 5 +(1 row) + +CREATE TABLE test.insert_into_select_copy (LIKE test.insert_into_select); /* INSERT INTO ... SELECT ... */ +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +---------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select_1 + -> Seq Scan on insert_into_select_2 + -> Seq Scan on insert_into_select_3 + -> Seq Scan on insert_into_select_4 + Filter: (val <= 80) +(7 rows) + +SELECT pathman.set_enable_parent('test.insert_into_select', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +INSERT INTO test.insert_into_select_copy +SELECT * FROM test.insert_into_select +WHERE val <= 80; + QUERY PLAN +------------------------------------------------------------------- + Insert on insert_into_select_copy + -> Append + -> Seq Scan on insert_into_select insert_into_select_1 + Filter: (val <= 80) + -> Seq Scan on insert_into_select_1 insert_into_select_2 + -> Seq Scan on insert_into_select_2 insert_into_select_3 + -> Seq Scan on insert_into_select_3 insert_into_select_4 + -> Seq Scan on insert_into_select_4 insert_into_select_5 + Filter: (val <= 80) +(9 rows) + +INSERT INTO test.insert_into_select_copy SELECT * FROM test.insert_into_select; +SELECT count(*) FROM test.insert_into_select_copy; + count +------- + 100 +(1 row) + +DROP TABLE test.insert_into_select_copy, test.insert_into_select CASCADE; +NOTICE: drop cascades to 6 other objects +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_3 num_range_rel + Filter: (2500 = id) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_3 num_range_rel_1 + Filter: (2500 < id) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_3 num_range_rel_1 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + Filter: (id >= 1500) + -> Seq Scan on num_range_rel_3 num_range_rel_2 + Filter: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on num_range_rel_1 + Filter: (id >= 500) + -> Seq Scan on num_range_rel_2 + Filter: (id < 1500) + -> Seq Scan on num_range_rel_3 + Filter: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: ('Sun Feb 15 00:00:00 2015'::timestamp without time zone < dt) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + Filter: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +--------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_1 + Filter: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_2 + Filter: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 + Filter: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (2 = value) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_1 + Filter: (value = 2) + -> Seq Scan on hash_rel_2 + Filter: (value = 1) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel + Index Cond: (id = 2500) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_1 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 num_range_rel_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on num_range_rel_2 num_range_rel_1 + -> Seq Scan on num_range_rel_3 num_range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 num_range_rel_1 + Index Cond: (id >= 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 num_range_rel_2 + Index Cond: (id < 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 500) + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + Index Cond: (id < 1500) + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id > 2500) + -> Seq Scan on num_range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; + QUERY PLAN +---------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 + Index Cond: (id <= 2500) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt > 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_3 range_rel_2 + -> Seq Scan on range_rel_4 range_rel_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; + QUERY PLAN +----------------------------------- + Seq Scan on range_rel_2 range_rel +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 range_rel_1 + Index Cond: (dt >= 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 range_rel_2 + Index Cond: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + Index Cond: (dt < 'Sun Feb 15 00:00:00 2015'::timestamp without time zone) + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + Index Cond: (dt > 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) + -> Seq Scan on range_rel_4 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-01-15' ORDER BY dt DESC; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan Backward using range_rel_4_dt_idx on range_rel_4 + -> Index Scan Backward using range_rel_3_dt_idx on range_rel_3 + -> Index Scan Backward using range_rel_2_dt_idx on range_rel_2 + -> Index Scan Backward using range_rel_1_dt_idx on range_rel_1 + Index Cond: (dt >= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +/* + * Sorting + */ +SET enable_indexscan = OFF; +SET enable_seqscan = ON; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +------------------------------------- + Sort + Sort Key: range_rel_1.dt + -> Append + -> Seq Scan on range_rel_1 + -> Seq Scan on range_rel_2 +(5 rows) + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Append + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: range_rel_1.dt + -> Index Scan using range_rel_1_dt_idx on range_rel_1 + -> Index Scan using range_rel_2_dt_idx on range_rel_2 +(4 rows) + +/* + * Test inlined SQL functions + */ +CREATE TABLE test.sql_inline (id INT NOT NULL); +SELECT pathman.create_hash_partitions('test.sql_inline', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +CREATE OR REPLACE FUNCTION test.sql_inline_func(i_id int) RETURNS SETOF INT AS $$ + select * from test.sql_inline where id = i_id limit 1; +$$ LANGUAGE sql STABLE; +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(5); + QUERY PLAN +------------------------------------------- + Limit + -> Seq Scan on sql_inline_0 sql_inline + Filter: (id = 5) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.sql_inline_func(1); + QUERY PLAN +------------------------------------------- + Limit + -> Seq Scan on sql_inline_2 sql_inline + Filter: (id = 1) +(3 rows) + +DROP FUNCTION test.sql_inline_func(int); +DROP TABLE test.sql_inline CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test by @baiyinqiqi (issue #60) + */ +CREATE TABLE test.hash_varchar(val VARCHAR(40) NOT NULL); +INSERT INTO test.hash_varchar SELECT generate_series(1, 20); +SELECT pathman.create_hash_partitions('test.hash_varchar', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT * FROM test.hash_varchar WHERE val = 'a'; + val +----- +(0 rows) + +SELECT * FROM test.hash_varchar WHERE val = '12'::TEXT; + val +----- + 12 +(1 row) + +DROP TABLE test.hash_varchar CASCADE; +NOTICE: drop cascades to 4 other objects +/* + * Test split and merge + */ +/* Split first partition in half */ +SELECT pathman.split_range_partition('test.num_range_rel_1', 500); + split_range_partition +----------------------- + test.num_range_rel_5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 + Index Cond: (id >= 100) + -> Index Scan using num_range_rel_5_pkey on num_range_rel_5 num_range_rel_2 + Index Cond: (id <= 700) +(5 rows) + +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; + tableoid | id +----------------------+----- + test.num_range_rel_1 | 499 + test.num_range_rel_5 | 500 + test.num_range_rel_5 | 501 +(3 rows) + +SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); + split_range_partition +----------------------- + test.range_rel_5 +(1 row) + +/* Merge two partitions into one */ +SELECT pathman.merge_range_partitions('test.num_range_rel_1', 'test.num_range_rel_' || currval('test.num_range_rel_seq')); + merge_range_partitions +------------------------ + test.num_range_rel_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using num_range_rel_1_pkey on num_range_rel_1 num_range_rel + Index Cond: ((id >= 100) AND (id <= 700)) +(2 rows) + +SELECT pathman.merge_range_partitions('test.range_rel_1', 'test.range_rel_' || currval('test.range_rel_seq')); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +/* Append and prepend partitions */ +SELECT pathman.append_range_partition('test.num_range_rel'); + append_range_partition +------------------------ + test.num_range_rel_6 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 4000; + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_6 num_range_rel +(1 row) + +SELECT pathman.prepend_range_partition('test.num_range_rel'); + prepend_range_partition +------------------------- + test.num_range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id < 0; + QUERY PLAN +------------------------------------------- + Seq Scan on num_range_rel_7 num_range_rel +(1 row) + +SELECT pathman.drop_range_partition('test.num_range_rel_7'); + drop_range_partition +---------------------- + test.num_range_rel_7 +(1 row) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_4'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 + test.num_range_rel | test.num_range_rel_6 | 2 | id | 3000 | 5000 +(4 rows) + +SELECT pathman.drop_range_partition_expand_next('test.num_range_rel_6'); + drop_range_partition_expand_next +---------------------------------- + +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.num_range_rel'::regclass; + parent | partition | parttype | expr | range_min | range_max +--------------------+----------------------+----------+------+-----------+----------- + test.num_range_rel | test.num_range_rel_1 | 2 | id | 0 | 1000 + test.num_range_rel | test.num_range_rel_2 | 2 | id | 1000 | 2000 + test.num_range_rel | test.num_range_rel_3 | 2 | id | 2000 | 3000 +(3 rows) + +SELECT pathman.append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_6 +(1 row) + +SELECT pathman.prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_7_dt_idx on range_rel_7 range_rel_1 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +SELECT pathman.drop_range_partition('test.range_rel_7'); + drop_range_partition +---------------------- + test.range_rel_7 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-02'::DATE); +ERROR: specified range [12-01-2014, 01-02-2015) overlaps with existing partitions +SELECT pathman.add_range_partition('test.range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + add_range_partition +--------------------- + test.range_rel_8 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-12-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_8_dt_idx on range_rel_8 range_rel_1 + Index Cond: (dt >= 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(5 rows) + +CREATE TABLE test.range_rel_archive (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2015-01-01'::DATE); +ERROR: specified range [01-01-2014, 01-01-2015) overlaps with existing partitions +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_archive', '2014-01-01'::DATE, '2014-12-01'::DATE); + attach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Scan using range_rel_archive_dt_idx on range_rel_archive range_rel_1 + Index Cond: (dt >= 'Sat Nov 15 00:00:00 2014'::timestamp without time zone) + -> Seq Scan on range_rel_8 range_rel_2 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_3 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(6 rows) + +SELECT pathman.detach_range_partition('test.range_rel_archive'); + detach_range_partition +------------------------ + test.range_rel_archive +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt BETWEEN '2014-11-15' AND '2015-01-15'; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_8 range_rel_1 + -> Index Scan using range_rel_1_dt_idx on range_rel_1 range_rel_2 + Index Cond: (dt <= 'Thu Jan 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +CREATE TABLE test.range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP, + txt TEXT, + abc INTEGER); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test1', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: partition must have a compatible tuple format +CREATE TABLE test.range_rel_test2 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_test2', '2013-01-01'::DATE, '2014-01-01'::DATE); +ERROR: column "dt" in child table must be marked NOT NULL +/* Half open ranges */ +SELECT pathman.add_range_partition('test.range_rel', NULL, '2014-12-01'::DATE, 'test.range_rel_minus_infinity'); + add_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT pathman.add_range_partition('test.range_rel', '2015-06-01'::DATE, NULL, 'test.range_rel_plus_infinity'); + add_range_partition +------------------------------ + test.range_rel_plus_infinity +(1 row) + +SELECT pathman.append_range_partition('test.range_rel'); +ERROR: Cannot append partition because last partition's range is half open +SELECT pathman.prepend_range_partition('test.range_rel'); +ERROR: Cannot prepend partition because first partition's range is half open +DROP TABLE test.range_rel_minus_infinity; +CREATE TABLE test.range_rel_minus_infinity (LIKE test.range_rel INCLUDING ALL); +SELECT pathman.attach_range_partition('test.range_rel', 'test.range_rel_minus_infinity', NULL, '2014-12-01'::DATE); + attach_range_partition +------------------------------- + test.range_rel_minus_infinity +(1 row) + +SELECT * FROM pathman.pathman_partition_list WHERE parent = 'test.range_rel'::REGCLASS; + parent | partition | parttype | expr | range_min | range_max +----------------+-------------------------------+----------+------+--------------------------+-------------------------- + test.range_rel | test.range_rel_minus_infinity | 2 | dt | | Mon Dec 01 00:00:00 2014 + test.range_rel | test.range_rel_8 | 2 | dt | Mon Dec 01 00:00:00 2014 | Thu Jan 01 00:00:00 2015 + test.range_rel | test.range_rel_1 | 2 | dt | Thu Jan 01 00:00:00 2015 | Sun Feb 01 00:00:00 2015 + test.range_rel | test.range_rel_2 | 2 | dt | Sun Feb 01 00:00:00 2015 | Sun Mar 01 00:00:00 2015 + test.range_rel | test.range_rel_3 | 2 | dt | Sun Mar 01 00:00:00 2015 | Wed Apr 01 00:00:00 2015 + test.range_rel | test.range_rel_4 | 2 | dt | Wed Apr 01 00:00:00 2015 | Fri May 01 00:00:00 2015 + test.range_rel | test.range_rel_6 | 2 | dt | Fri May 01 00:00:00 2015 | Mon Jun 01 00:00:00 2015 + test.range_rel | test.range_rel_plus_infinity | 2 | dt | Mon Jun 01 00:00:00 2015 | +(8 rows) + +INSERT INTO test.range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO test.range_rel (dt) VALUES ('2015-12-15'); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-01-01'; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on range_rel_minus_infinity range_rel_1 + -> Seq Scan on range_rel_8 range_rel_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-05-01'; + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on range_rel_6 range_rel_1 + -> Seq Scan on range_rel_plus_infinity range_rel_2 +(3 rows) + +/* + * Zero partitions count and adding partitions with specified name + */ +CREATE TABLE test.zero( + id SERIAL PRIMARY KEY, + value INT NOT NULL); +INSERT INTO test.zero SELECT g, g FROM generate_series(1, 100) as g; +SELECT pathman.create_range_partitions('test.zero', 'value', 50, 10, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_0'); +ERROR: relation "zero" has no partitions +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_1'); +ERROR: relation "zero" has no partitions +SELECT pathman.add_range_partition('test.zero', 50, 70, 'test.zero_50'); + add_range_partition +--------------------- + test.zero_50 +(1 row) + +SELECT pathman.append_range_partition('test.zero', 'test.zero_appended'); + append_range_partition +------------------------ + test.zero_appended +(1 row) + +SELECT pathman.prepend_range_partition('test.zero', 'test.zero_prepended'); + prepend_range_partition +------------------------- + test.zero_prepended +(1 row) + +SELECT pathman.split_range_partition('test.zero_50', 60, 'test.zero_60'); + split_range_partition +----------------------- + test."test.zero_60" +(1 row) + +DROP TABLE test.zero CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Check that altering table columns doesn't break trigger + */ +ALTER TABLE test.hash_rel ADD COLUMN abc int; +INSERT INTO test.hash_rel (id, value, abc) VALUES (123, 456, 789); +SELECT * FROM test.hash_rel WHERE id = 123; + id | value | abc +-----+-------+----- + 123 | 456 | 789 +(1 row) + +/* Test replacing hash partition */ +CREATE TABLE test.hash_rel_extern (LIKE test.hash_rel INCLUDING ALL); +SELECT pathman.replace_hash_partition('test.hash_rel_0', 'test.hash_rel_extern'); + replace_hash_partition +------------------------ + test.hash_rel_extern +(1 row) + +/* Check the consistency of test.hash_rel_0 and test.hash_rel_extern relations */ +EXPLAIN(COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +SELECT parent, partition, parttype +FROM pathman.pathman_partition_list +WHERE parent='test.hash_rel'::regclass +ORDER BY 2; + parent | partition | parttype +---------------+----------------------+---------- + test.hash_rel | test.hash_rel_1 | 1 + test.hash_rel | test.hash_rel_2 | 1 + test.hash_rel | test.hash_rel_extern | 1 +(3 rows) + +SELECT c.oid::regclass::text, + array_agg(pg_get_indexdef(i.indexrelid)) AS indexes, + array_agg(pg_get_triggerdef(t.oid)) AS triggers +FROM pg_class c + LEFT JOIN pg_index i ON c.oid=i.indrelid + LEFT JOIN pg_trigger t ON c.oid=t.tgrelid +WHERE c.oid IN ('test.hash_rel_0'::regclass, 'test.hash_rel_extern'::regclass) +GROUP BY 1 ORDER BY 1; + oid | indexes | triggers +----------------------+---------------------------------------------------------------------------------------+---------- + test.hash_rel_0 | {"CREATE UNIQUE INDEX hash_rel_0_pkey ON test.hash_rel_0 USING btree (id)"} | {NULL} + test.hash_rel_extern | {"CREATE UNIQUE INDEX hash_rel_extern_pkey ON test.hash_rel_extern USING btree (id)"} | {NULL} +(2 rows) + +SELECT pathman.is_tuple_convertible('test.hash_rel_0', 'test.hash_rel_extern'); + is_tuple_convertible +---------------------- + t +(1 row) + +INSERT INTO test.hash_rel SELECT * FROM test.hash_rel_0; +DROP TABLE test.hash_rel_0; +/* Table with which we are replacing partition must have exact same structure */ +CREATE TABLE test.hash_rel_wrong( + id INTEGER NOT NULL, + value INTEGER); +SELECT pathman.replace_hash_partition('test.hash_rel_1', 'test.hash_rel_wrong'); +ERROR: column "value" in child table must be marked NOT NULL +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on hash_rel_extern hash_rel_1 + -> Seq Scan on hash_rel_1 hash_rel_2 + -> Seq Scan on hash_rel_2 hash_rel_3 +(4 rows) + +/* + * Clean up + */ +SELECT pathman.drop_partitions('test.hash_rel'); +NOTICE: 3 rows copied from test.hash_rel_1 +NOTICE: 2 rows copied from test.hash_rel_2 +NOTICE: 2 rows copied from test.hash_rel_extern + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 7 +(1 row) + +SELECT pathman.create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT pathman.drop_partitions('test.hash_rel', TRUE); + drop_partitions +----------------- + 3 +(1 row) + +SELECT COUNT(*) FROM ONLY test.hash_rel; + count +------- + 0 +(1 row) + +DROP TABLE test.hash_rel CASCADE; +SELECT pathman.drop_partitions('test.num_range_rel'); +NOTICE: 999 rows copied from test.num_range_rel_1 +NOTICE: 1000 rows copied from test.num_range_rel_2 +NOTICE: 1000 rows copied from test.num_range_rel_3 + drop_partitions +----------------- + 3 +(1 row) + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 10 other objects +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; + reloptions | relpersistence +-----------------+---------------- + {fillfactor=70} | p +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* Test automatic partition creation */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT pathman.create_range_partitions('test.range_rel', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.range_rel (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); +INSERT INTO test.range_rel (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_14 range_rel + Filter: (dt = 'Mon Dec 15 00:00:00 2014'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2014-12-15'; + id | dt | data +-----+--------------------------+------ + 137 | Mon Dec 15 00:00:00 2014 | +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on range_rel_8 range_rel + Filter: (dt = 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(2 rows) + +SELECT * FROM test.range_rel WHERE dt = '2015-03-15'; + id | dt | data +----+--------------------------+------ + 74 | Sun Mar 15 00:00:00 2015 | +(1 row) + +SELECT pathman.set_auto('test.range_rel', false); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +ERROR: no suitable partition for key 'Mon Jun 01 00:00:00 2015' +SELECT pathman.set_auto('test.range_rel', true); + set_auto +---------- + +(1 row) + +INSERT INTO test.range_rel (dt) VALUES ('2015-06-01'); +/* + * Test auto removing record from config on table DROP (but not on column drop + * as it used to be before version 1.2) + */ +ALTER TABLE test.range_rel DROP COLUMN data; +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +----------------+------+----------+---------------- + test.range_rel | dt | 2 | @ 10 days +(1 row) + +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 21 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +---------+------+----------+---------------- +(0 rows) + +/* Check overlaps */ +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 1000, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4001, 5000); +ERROR: specified range [4001, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 4000, 5000); +ERROR: specified range [4000, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3999, 5000); +ERROR: specified range [3999, 5000) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 3000, 3500); +ERROR: specified range [3000, 3500) overlaps with existing partitions +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 999); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1000); + check_range_available +----------------------- + +(1 row) + +SELECT pathman.check_range_available('test.num_range_rel'::regclass, 0, 1001); +ERROR: specified range [0, 1001) overlaps with existing partitions +/* CaMeL cAsE table names and attributes */ +CREATE TABLE test."TeSt" (a INT NOT NULL, b INT); +SELECT pathman.create_hash_partitions('test.TeSt', 'a', 3); +ERROR: relation "test.test" does not exist at character 39 +SELECT pathman.create_hash_partitions('test."TeSt"', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO test."TeSt" VALUES (1, 1); +INSERT INTO test."TeSt" VALUES (2, 2); +INSERT INTO test."TeSt" VALUES (3, 3); +SELECT * FROM test."TeSt"; + a | b +---+--- + 3 | 3 + 2 | 2 + 1 | 1 +(3 rows) + +DROP TABLE test."TeSt" CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test."RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test."RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT pathman.append_range_partition('test."RangeRel"'); + append_range_partition +------------------------ + test."RangeRel_4" +(1 row) + +SELECT pathman.prepend_range_partition('test."RangeRel"'); + prepend_range_partition +------------------------- + test."RangeRel_5" +(1 row) + +SELECT pathman.merge_range_partitions('test."RangeRel_1"', 'test."RangeRel_' || currval('test."RangeRel_seq"') || '"'); + merge_range_partitions +------------------------ + test."RangeRel_1" +(1 row) + +SELECT pathman.split_range_partition('test."RangeRel_1"', '2015-01-01'::DATE); + split_range_partition +----------------------- + test."RangeRel_6" +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 6 other objects +SELECT * FROM pathman.pathman_config; + partrel | expr | parttype | range_interval +--------------------+------+----------+---------------- + test.num_range_rel | id | 2 | 1000 +(1 row) + +CREATE TABLE test."RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +SELECT pathman.create_range_partitions('test."RangeRel"', 'id', 1, 100, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test."RangeRel" CASCADE; +NOTICE: drop cascades to 4 other objects +DROP EXTENSION pg_pathman; +/* Test that everything works fine without schemas */ +CREATE EXTENSION pg_pathman; +/* Hash */ +CREATE TABLE test.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO test.hash_rel (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('test.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE id = 1234; + QUERY PLAN +----------------------------------------------------------------- + Append + -> Index Scan using hash_rel_0_pkey on hash_rel_0 hash_rel_1 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_1_pkey on hash_rel_1 hash_rel_2 + Index Cond: (id = 1234) + -> Index Scan using hash_rel_2_pkey on hash_rel_2 hash_rel_3 + Index Cond: (id = 1234) +(7 rows) + +/* Range */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') as g; +SELECT create_range_partitions('test.range_rel', 'dt', '2010-01-01'::date, '1 month'::interval, 12); + create_range_partitions +------------------------- + 12 +(1 row) + +SELECT merge_range_partitions('test.range_rel_1', 'test.range_rel_2'); + merge_range_partitions +------------------------ + test.range_rel_1 +(1 row) + +SELECT split_range_partition('test.range_rel_1', '2010-02-15'::date); + split_range_partition +----------------------- + test.range_rel_13 +(1 row) + +SELECT append_range_partition('test.range_rel'); + append_range_partition +------------------------ + test.range_rel_14 +(1 row) + +SELECT prepend_range_partition('test.range_rel'); + prepend_range_partition +------------------------- + test.range_rel_15 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on range_rel_15 range_rel_1 + -> Seq Scan on range_rel_1 range_rel_2 + -> Seq Scan on range_rel_13 range_rel_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_12 range_rel_1 + Filter: (dt > 'Wed Dec 15 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on range_rel_14 range_rel_2 +(4 rows) + +/* Create range partitions from whole range */ +SELECT drop_partitions('test.range_rel'); +NOTICE: 45 rows copied from test.range_rel_1 +NOTICE: 31 rows copied from test.range_rel_3 +NOTICE: 30 rows copied from test.range_rel_4 +NOTICE: 31 rows copied from test.range_rel_5 +NOTICE: 30 rows copied from test.range_rel_6 +NOTICE: 31 rows copied from test.range_rel_7 +NOTICE: 31 rows copied from test.range_rel_8 +NOTICE: 30 rows copied from test.range_rel_9 +NOTICE: 31 rows copied from test.range_rel_10 +NOTICE: 30 rows copied from test.range_rel_11 +NOTICE: 31 rows copied from test.range_rel_12 +NOTICE: 14 rows copied from test.range_rel_13 +NOTICE: 0 rows copied from test.range_rel_14 +NOTICE: 0 rows copied from test.range_rel_15 + drop_partitions +----------------- + 14 +(1 row) + +/* Test NOT operator */ +CREATE TABLE bool_test(a INT NOT NULL, b BOOLEAN); +SELECT create_hash_partitions('bool_test', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO bool_test SELECT g, (g % 4) = 0 FROM generate_series(1, 100) AS g; +SELECT count(*) FROM bool_test; + count +------- + 100 +(1 row) + +SELECT count(*) FROM bool_test WHERE (b = true AND b = false); + count +------- + 0 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = false; /* 75 values */ + count +------- + 75 +(1 row) + +SELECT count(*) FROM bool_test WHERE b = true; /* 25 values */ + count +------- + 25 +(1 row) + +DROP TABLE bool_test CASCADE; +NOTICE: drop cascades to 3 other objects +/* Special test case (quals generation) -- fixing commit f603e6c5 */ +CREATE TABLE test.special_case_1_ind_o_s(val serial, comment text); +INSERT INTO test.special_case_1_ind_o_s SELECT generate_series(1, 200), NULL; +SELECT create_range_partitions('test.special_case_1_ind_o_s', 'val', 1, 50); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.special_case_1_ind_o_s_2 SELECT 75 FROM generate_series(1, 6000); +CREATE INDEX ON test.special_case_1_ind_o_s_2 (val, comment); +VACUUM ANALYZE test.special_case_1_ind_o_s_2; +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s special_case_1_ind_o_s_1 + Filter: ((val < 75) AND (comment = 'a'::text)) + -> Seq Scan on special_case_1_ind_o_s_1 special_case_1_ind_o_s_2 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 special_case_1_ind_o_s_3 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(7 rows) + +SELECT set_enable_parent('test.special_case_1_ind_o_s', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test.special_case_1_ind_o_s WHERE val < 75 AND comment = 'a'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on special_case_1_ind_o_s_1 + Filter: (comment = 'a'::text) + -> Index Only Scan using special_case_1_ind_o_s_2_val_comment_idx on special_case_1_ind_o_s_2 + Index Cond: ((val < 75) AND (comment = 'a'::text)) +(5 rows) + +/* Test index scans on child relation under enable_parent is set */ +CREATE TABLE test.index_on_childs(c1 integer not null, c2 integer); +CREATE INDEX ON test.index_on_childs(c2); +INSERT INTO test.index_on_childs SELECT i, (random()*10000)::integer FROM generate_series(1, 10000) i; +SELECT create_range_partitions('test.index_on_childs', 'c1', 1, 1000, 0, false); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('test.index_on_childs', 1, 1000, 'test.index_on_childs_1_1k'); + add_range_partition +--------------------------- + test.index_on_childs_1_1k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_1k_2k'); + append_range_partition +---------------------------- + test.index_on_childs_1k_2k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_2k_3k'); + append_range_partition +---------------------------- + test.index_on_childs_2k_3k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_3k_4k'); + append_range_partition +---------------------------- + test.index_on_childs_3k_4k +(1 row) + +SELECT append_range_partition('test.index_on_childs', 'test.index_on_childs_4k_5k'); + append_range_partition +---------------------------- + test.index_on_childs_4k_5k +(1 row) + +SELECT set_enable_parent('test.index_on_childs', true); + set_enable_parent +------------------- + +(1 row) + +VACUUM ANALYZE test.index_on_childs; +EXPLAIN (COSTS OFF) SELECT * FROM test.index_on_childs WHERE c1 > 100 AND c1 < 2500 AND c2 = 500; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Append + -> Index Scan using index_on_childs_c2_idx on index_on_childs index_on_childs_1 + Index Cond: (c2 = 500) + Filter: ((c1 > 100) AND (c1 < 2500)) + -> Index Scan using index_on_childs_1_1k_c2_idx on index_on_childs_1_1k index_on_childs_2 + Index Cond: (c2 = 500) + Filter: (c1 > 100) + -> Index Scan using index_on_childs_1k_2k_c2_idx on index_on_childs_1k_2k index_on_childs_3 + Index Cond: (c2 = 500) + -> Index Scan using index_on_childs_2k_3k_c2_idx on index_on_childs_2k_3k index_on_childs_4 + Index Cond: (c2 = 500) + Filter: (c1 < 2500) +(12 rows) + +/* Test create_range_partitions() + partition_names */ +CREATE TABLE test.provided_part_names(id INT NOT NULL); +INSERT INTO test.provided_part_names SELECT generate_series(1, 10); +SELECT create_hash_partitions('test.provided_part_names', 'id', 2, + partition_names := ARRAY['p1', 'p2']::TEXT[]); /* ok */ + create_hash_partitions +------------------------ + 2 +(1 row) + +/* list partitions */ +SELECT partition FROM pathman_partition_list +WHERE parent = 'test.provided_part_names'::REGCLASS +ORDER BY partition; + partition +----------- + p1 + p2 +(2 rows) + +DROP TABLE test.provided_part_names CASCADE; +NOTICE: drop cascades to 2 other objects +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; + id +---- + 1 +(1 row) + +SELECT * FROM test.mixinh_parent; +ERROR: could not expand partitioned table "mixinh_child1" +DROP TABLE test.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test.index_on_childs CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.mixinh_child1 CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_bgw.out b/expected/pathman_bgw.out index 3c955c05..4f2ad6b8 100644 --- a/expected/pathman_bgw.out +++ b/expected/pathman_bgw.out @@ -105,5 +105,142 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP TABLE test_bgw.test_4 CASCADE; NOTICE: drop cascades to 4 other objects -DROP SCHEMA test_bgw CASCADE; +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; +SELECT set_spawn_using_bgw('test_bgw.test_5', true); + set_spawn_using_bgw +--------------------- + +(1 row) + +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); + set_init_callback +------------------- + +(1 row) + +INSERT INTO test_bgw.test_5 VALUES (-100); +ERROR: attempt to spawn new partitions of relation "test_5" failed +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + parent | partition | parttype | expr | range_min | range_max +-----------------+-------------------+----------+------+-----------+----------- + test_bgw.test_5 | test_bgw.test_5_1 | 2 | val | 1 | 11 + test_bgw.test_5 | test_bgw.test_5_2 | 2 | val | 11 | 21 +(2 rows) + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Tests for ConcurrentPartWorker + */ +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + create_hash_partitions +------------------------ + 5 +(1 row) + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; + id +---- + 1 +(1 row) + +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +NOTICE: worker started, you can stop it with the following command: select public.stop_concurrent_part_task('conc_part'); + partition_table_concurrently +------------------------------ + +(1 row) + +/* Wait until bgworker starts */ +SELECT pg_sleep(1); + pg_sleep +---------- + +(1 row) + +ROLLBACK; +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop +BEGIN + LOOP + -- get total number of processed rows + SELECT processed + FROM pathman_concurrent_part_tasks + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; + END IF; + ELSE + EXIT; -- exit loop + END IF; + + IF i > 500 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; + END LOOP; +END +$$ LANGUAGE plpgsql; +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; + count +------- + 0 +(1 row) + +SELECT count(*) FROM ONLY test_bgw.conc_part; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_bgw.conc_part; + count +------- + 500 +(1 row) + +DROP TABLE test_bgw.conc_part CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks.out b/expected/pathman_cache_pranks.out new file mode 100644 index 00000000..278643ff --- /dev/null +++ b/expected/pathman_cache_pranks.out @@ -0,0 +1,230 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cache_pranks_1.out b/expected/pathman_cache_pranks_1.out new file mode 100644 index 00000000..4a3982a6 --- /dev/null +++ b/expected/pathman_cache_pranks_1.out @@ -0,0 +1,237 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? +SET search_path = 'public'; +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; +-- create it for further tests +CREATE EXTENSION pg_pathman; +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('part_test', 100); + set_interval +-------------- + +(1 row) + +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +ERROR: table "part_test" has no partitions +SELECT disable_pathman_for('part_test'); + disable_pathman_for +--------------------- + +(1 row) + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +SELECT add_to_pathman_config('part_test', 'val'); +ERROR: wrong constraint format for HASH partition "part_test_1" +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 5 other objects +-- +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 300 +(1 row) + +SELECT append_range_partition('part_test'); + append_range_partition +------------------------ + part_test_301 +(1 row) + +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +ERROR: cannot create partition with range (-inf, +inf) +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: can't partition table "part_test" with existing children +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 302 other objects +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +SET pg_pathman.enable = f; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +EXECUTE q(1); +ERROR: table "part_test" is not partitioned +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 11 other objects +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +EXECUTE q(1); + a | b +---+--- +(0 rows) + +BEGIN; + BEGIN AUTONOMOUS; +ERROR: syntax error at or near "AUTONOMOUS" at character 7 + DROP EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + CREATE EXTENSION pg_pathman; +ERROR: current transaction is aborted, commands ignored until end of transaction block + COMMIT; +COMMIT; +WARNING: there is no transaction in progress +EXECUTE q(1); + a | b +---+--- +(0 rows) + +DEALLOCATE q; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 3 other objects +-- finalize +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity.out b/expected/pathman_calamity.out index 14eca51d..b9421bde 100644 --- a/expected/pathman_calamity.out +++ b/expected/pathman_calamity.out @@ -1,4 +1,15 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; /* call for coverage test */ @@ -9,10 +20,10 @@ SELECT debug_capture(); (1 row) -SELECT get_pathman_lib_version(); - get_pathman_lib_version -------------------------- - 10400 +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 (1 row) set client_min_messages = NOTICE; @@ -280,21 +291,21 @@ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ (1 row) /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ ERROR: relation "1" does not exist -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ ERROR: 'partrel' should not be NULL -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ ERROR: 'expression' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ ERROR: 'parttype' should not be NULL -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ ERROR: interval should be NULL for HASH partitioned table -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ ERROR: failed to analyze partitioning expression "expr" -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -ERROR: unrecognized token: "cooked_expr" -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ ERROR: failed to analyze partitioning expression "EXPR" /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); @@ -309,7 +320,7 @@ SELECT validate_relname(NULL); ERROR: relation should not be NULL /* check function validate_expression() */ SELECT validate_expression(1::regclass, NULL); /* not ok */ -ERROR: relation "1" does not exist +ERROR: identifier "1" must be normal Oid SELECT validate_expression(NULL::regclass, NULL); /* not ok */ ERROR: 'relid' should not be NULL SELECT validate_expression('calamity.part_test', NULL); /* not ok */ @@ -398,36 +409,6 @@ SELECT build_check_constraint_name(NULL) IS NULL; t (1 row) -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ - build_update_trigger_name ---------------------------- - part_test_upd_trig -(1 row) - -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ - build_update_trigger_func_name ----------------------------------- - calamity.part_test_upd_trig_func -(1 row) - -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist -SELECT build_update_trigger_func_name(NULL) IS NULL; - ?column? ----------- - t -(1 row) - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ build_sequence_name @@ -445,19 +426,19 @@ SELECT build_sequence_name(NULL) IS NULL; /* check function partition_table_concurrently() */ SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ -ERROR: relation "1" has no partitions +ERROR: identifier "1" must be normal Oid SELECT partition_table_concurrently('pg_class', 0); /* not ok */ ERROR: 'batch_size' should not be less than 1 or greater than 10000 SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ ERROR: 'sleep_time' should not be less than 0.5 SELECT partition_table_concurrently('pg_class'); /* not ok */ -ERROR: relation "pg_class" has no partitions +ERROR: identifier "1259" must be normal Oid /* check function stop_concurrent_part_task() */ SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ ERROR: cannot find worker for relation "1" /* check function drop_range_partition_expand_next() */ SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ -ERROR: relation "pg_class" is not a partition +ERROR: identifier "1259" must be normal Oid SELECT drop_range_partition_expand_next(NULL) IS NULL; ?column? ---------- @@ -512,14 +493,6 @@ WARNING: table "pg_class" is not partitioned (1 row) -SELECT has_update_trigger(NULL); - has_update_trigger --------------------- - -(1 row) - -SELECT has_update_trigger(0::REGCLASS); /* not ok */ -ERROR: relation "0" does not exist /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -587,7 +560,7 @@ DROP FUNCTION calamity.dummy_cb(arg jsonb); SELECT add_to_pathman_config(NULL, 'val'); /* no table */ ERROR: 'parent_relid' should not be NULL SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ -ERROR: relation "0" does not exist +ERROR: identifier "0" must be normal Oid SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ ERROR: 'expression' should not be NULL SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ @@ -783,9 +756,9 @@ SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ DROP TABLE calamity.test_range_oid CASCADE; NOTICE: drop cascades to 2 other objects /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ ERROR: cannot merge partitions -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ ERROR: cannot merge partitions CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -801,59 +774,40 @@ SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); 2 (1 row) -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ ERROR: cannot merge partitions DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; NOTICE: drop cascades to 6 other objects -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); - create_hash_partitions ------------------------- - 2 -(1 row) - -SELECT create_update_triggers('calamity.trig_test_tbl'); - create_update_triggers ------------------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 1 -(1 row) - -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ - drop_triggers ---------------- - -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; - count -------- - 0 -(1 row) - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; - count -------- - 0 -(1 row) - -DROP TABLE calamity.trig_test_tbl CASCADE; +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; NOTICE: drop cascades to 2 other objects -DROP SCHEMA calamity CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; DROP EXTENSION pg_pathman; /* * ------------------------------------- @@ -870,25 +824,25 @@ SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10 10 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -916,25 +870,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 1 - partition parents cache | 10 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -962,25 +916,25 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 11 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); @@ -1006,14 +960,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -> Seq Scan on test_pathman_cache_stats_10 (11 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); drop_range_partition @@ -1021,27 +975,27 @@ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); calamity.test_pathman_cache_stats_1 (1 row) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 9 - partition dispatch cache | 1 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) DROP TABLE calamity.test_pathman_cache_stats CASCADE; NOTICE: drop cascades to 10 other objects -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 0 - partition dispatch cache | 0 - partition parents cache | 0 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; /* * ------------------------------------------ @@ -1069,11 +1023,11 @@ SHOW pg_pathman.enable; (1 row) SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled SELECT * FROM pathman_partition_list; /* not ok */ ERROR: pg_pathman is not initialized yet SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ -ERROR: pg_pathman is not initialized yet +ERROR: pg_pathman is disabled EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ QUERY PLAN ------------------------------ @@ -1114,5 +1068,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_1.out b/expected/pathman_calamity_1.out new file mode 100644 index 00000000..6ca2e7dd --- /dev/null +++ b/expected/pathman_calamity_1.out @@ -0,0 +1,1072 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +----------------------------- + Append + -> Seq Scan on part_ok_0 + -> Seq Scan on part_ok_1 + -> Seq Scan on part_ok_2 + -> Seq Scan on part_ok_3 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_2.out b/expected/pathman_calamity_2.out new file mode 100644 index 00000000..fa3295f6 --- /dev/null +++ b/expected/pathman_calamity_2.out @@ -0,0 +1,1072 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); +ERROR: no hash function for type calamity.part_test +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_calamity_3.out b/expected/pathman_calamity_3.out new file mode 100644 index 00000000..a8879ef7 --- /dev/null +++ b/expected/pathman_calamity_3.out @@ -0,0 +1,1076 @@ +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA calamity; +/* call for coverage test */ +set client_min_messages = ERROR; +SELECT debug_capture(); + debug_capture +--------------- + +(1 row) + +SELECT pathman_version(); + pathman_version +----------------- + 1.5.12 +(1 row) + +set client_min_messages = NOTICE; +/* create table to be partitioned */ +CREATE TABLE calamity.part_test(val serial); +/* test pg_pathman's cache */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 + drop_partitions +----------------- + 3 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +SELECT drop_partitions('calamity.part_test'); +NOTICE: 10 rows copied from calamity.part_test_1 +NOTICE: 10 rows copied from calamity.part_test_2 +NOTICE: 10 rows copied from calamity.part_test_3 +NOTICE: 0 rows copied from calamity.part_test_4 + drop_partitions +----------------- + 4 +(1 row) + +SELECT count(*) FROM calamity.part_test; + count +------- + 30 +(1 row) + +DELETE FROM calamity.part_test; +/* test function create_single_range_partition() */ +SELECT create_single_range_partition(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_single_range_partition('pg_class', NULL::INT4, NULL); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +SELECT add_to_pathman_config('calamity.part_test', 'val'); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT create_single_range_partition('calamity.part_test', NULL::INT4, NULL); /* not ok */ +ERROR: table "part_test" is not partitioned by RANGE +DELETE FROM pathman_config WHERE partrel = 'calamity.part_test'::REGCLASS; +/* test function create_range_partitions_internal() */ +SELECT create_range_partitions_internal(NULL, '{}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', + NULL::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' should not be NULL +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + '{part_1}'::TEXT[], NULL); /* not ok */ +ERROR: wrong length of 'partition_names' array +SELECT create_range_partitions_internal('calamity.part_test', '{1}'::INT[], + NULL, '{tblspc_1}'::TEXT[]); /* not ok */ +ERROR: wrong length of 'tablespaces' array +SELECT create_range_partitions_internal('calamity.part_test', + '{1, NULL}'::INT[], NULL, NULL); /* not ok */ +ERROR: only first bound can be NULL +SELECT create_range_partitions_internal('calamity.part_test', + '{2, 1}'::INT[], NULL, NULL); /* not ok */ +ERROR: 'bounds' array must be ascending +/* test function create_hash_partitions() */ +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[]::TEXT[]); /* not ok */ +ERROR: array should not be empty +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ 'p1', NULL ]::TEXT[]); /* not ok */ +ERROR: array should not contain NULLs +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY[ ['p1'], ['p2'] ]::TEXT[]); /* not ok */ +ERROR: array should contain only 1 dimension +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + partition_names := ARRAY['calamity.p1']::TEXT[]); /* not ok */ +ERROR: size of 'partition_names' must be equal to 'partitions_count' +SELECT create_hash_partitions('calamity.part_test', 'val', 2, + tablespaces := ARRAY['abcd']::TEXT[]); /* not ok */ +ERROR: size of 'tablespaces' must be equal to 'partitions_count' +/* test case when naming sequence does not exist */ +CREATE TABLE calamity.no_naming_seq(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.no_naming_seq', 'val', '100'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition(' calamity.no_naming_seq', 10, 20); +ERROR: auto naming sequence "no_naming_seq_seq" does not exist +DROP TABLE calamity.no_naming_seq CASCADE; +/* test (-inf, +inf) partition creation */ +CREATE TABLE calamity.double_inf(val INT4 NOT NULL); +SELECT add_to_pathman_config('calamity.double_inf', 'val', '10'); + add_to_pathman_config +----------------------- + t +(1 row) + +select add_range_partition('calamity.double_inf', NULL::INT4, NULL::INT4, + partition_name := 'double_inf_part'); +ERROR: cannot create partition with range (-inf, +inf) +DROP TABLE calamity.double_inf CASCADE; +/* test stub 'enable_parent' value for PATHMAN_CONFIG_PARAMS */ +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DELETE FROM pathman_config_params WHERE partrel = 'calamity.part_test'::regclass; +SELECT append_range_partition('calamity.part_test'); + append_range_partition +------------------------ + calamity.part_test_4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_test; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on part_test_1 + -> Seq Scan on part_test_2 + -> Seq Scan on part_test_3 + -> Seq Scan on part_test_4 +(5 rows) + +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 4 +(1 row) + +DELETE FROM calamity.part_test; +/* check function validate_interval_value() */ +SELECT set_interval('pg_catalog.pg_class', 100); /* not ok */ +ERROR: table "pg_class" is not partitioned by RANGE +INSERT INTO calamity.part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('calamity.part_test', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT set_interval('calamity.part_test', 100); /* ok */ + set_interval +-------------- + +(1 row) + +SELECT set_interval('calamity.part_test', 15.6); /* not ok */ +ERROR: invalid input syntax for type integer: "15.6" +SELECT set_interval('calamity.part_test', 'abc'::text); /* not ok */ +ERROR: invalid input syntax for type integer: "abc" +SELECT drop_partitions('calamity.part_test', true); + drop_partitions +----------------- + 3 +(1 row) + +DELETE FROM calamity.part_test; +/* check function build_hash_condition() */ +SELECT build_hash_condition('int4', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashint4(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('text', 'val', 10, 1); + build_hash_condition +------------------------------------------------- + public.get_hash_part_idx(hashtext(val), 10) = 1 +(1 row) + +SELECT build_hash_condition('int4', 'val', 1, 1); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('int4', 'val', 10, 20); +ERROR: 'partition_index' must be lower than 'partitions_count' +SELECT build_hash_condition('text', 'val', 10, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT build_hash_condition('calamity.part_test', 'val', 10, 1); + build_hash_condition +---------------------------------------------------- + public.get_hash_part_idx(hash_record(val), 10) = 1 +(1 row) + +/* check function build_range_condition() */ +SELECT build_range_condition(NULL, 'val', 10, 20); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT build_range_condition('calamity.part_test', NULL, 10, 20); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT build_range_condition('calamity.part_test', 'val', 10, 20); /* OK */ + build_range_condition +------------------------------ + ((val >= 10) AND (val < 20)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ + build_range_condition +----------------------- + ((val >= 10)) +(1 row) + +SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ + build_range_condition +----------------------- + ((val < 10)) +(1 row) + +/* check function validate_interval_value() */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +ERROR: relation "1" does not exist +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +ERROR: 'partrel' should not be NULL +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +ERROR: 'parttype' should not be NULL +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +ERROR: interval should be NULL for HASH partitioned table +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +ERROR: failed to analyze partitioning expression "expr" +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ +ERROR: failed to analyze partitioning expression "EXPR" +/* check function validate_relname() */ +SELECT validate_relname('calamity.part_test'); + validate_relname +------------------ + +(1 row) + +SELECT validate_relname(1::REGCLASS); +ERROR: relation "1" does not exist +SELECT validate_relname(NULL); +ERROR: relation should not be NULL +/* check function validate_expression() */ +SELECT validate_expression(1::regclass, NULL); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT validate_expression(NULL::regclass, NULL); /* not ok */ +ERROR: 'relid' should not be NULL +SELECT validate_expression('calamity.part_test', NULL); /* not ok */ +ERROR: 'expression' should not be NULL +SELECT validate_expression('calamity.part_test', 'valval'); /* not ok */ +ERROR: failed to analyze partitioning expression "valval" +SELECT validate_expression('calamity.part_test', 'random()'); /* not ok */ +ERROR: failed to analyze partitioning expression "random()" +SELECT validate_expression('calamity.part_test', 'val'); /* OK */ + validate_expression +--------------------- + +(1 row) + +SELECT validate_expression('calamity.part_test', 'VaL'); /* OK */ + validate_expression +--------------------- + +(1 row) + +/* check function get_number_of_partitions() */ +SELECT get_number_of_partitions('calamity.part_test'); + get_number_of_partitions +-------------------------- + 0 +(1 row) + +SELECT get_number_of_partitions(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_parent_of_partition() */ +SELECT get_parent_of_partition('calamity.part_test'); +ERROR: "part_test" is not a partition +SELECT get_parent_of_partition(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_base_type() */ +CREATE DOMAIN calamity.test_domain AS INT4; +SELECT get_base_type('int4'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type('calamity.test_domain'::regtype); + get_base_type +--------------- + integer +(1 row) + +SELECT get_base_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function get_partition_key_type() */ +SELECT get_partition_key_type('calamity.part_test'); +ERROR: relation "part_test" has no partitions +SELECT get_partition_key_type(0::regclass); +ERROR: relation "0" has no partitions +SELECT get_partition_key_type(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_check_constraint_name() */ +SELECT build_check_constraint_name('calamity.part_test'); /* OK */ + build_check_constraint_name +----------------------------- + pathman_part_test_check +(1 row) + +SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ +ERROR: relation "0" does not exist +SELECT build_check_constraint_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function build_sequence_name() */ +SELECT build_sequence_name('calamity.part_test'); /* OK */ + build_sequence_name +------------------------ + calamity.part_test_seq +(1 row) + +SELECT build_sequence_name(1::REGCLASS); /* not ok */ +ERROR: relation "1" does not exist +SELECT build_sequence_name(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function partition_table_concurrently() */ +SELECT partition_table_concurrently(1::REGCLASS); /* not ok */ +ERROR: identifier "1" must be normal Oid +SELECT partition_table_concurrently('pg_class', 0); /* not ok */ +ERROR: 'batch_size' should not be less than 1 or greater than 10000 +SELECT partition_table_concurrently('pg_class', 1, 1E-5); /* not ok */ +ERROR: 'sleep_time' should not be less than 0.5 +SELECT partition_table_concurrently('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +/* check function stop_concurrent_part_task() */ +SELECT stop_concurrent_part_task(1::REGCLASS); /* not ok */ +ERROR: cannot find worker for relation "1" +/* check function drop_range_partition_expand_next() */ +SELECT drop_range_partition_expand_next('pg_class'); /* not ok */ +ERROR: identifier "1259" must be normal Oid +SELECT drop_range_partition_expand_next(NULL) IS NULL; + ?column? +---------- + t +(1 row) + +/* check function generate_range_bounds() */ +SELECT generate_range_bounds(NULL, 100, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, NULL::INT4, 10) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, NULL) IS NULL; + ?column? +---------- + t +(1 row) + +SELECT generate_range_bounds(0, 100, 0); /* not ok */ +ERROR: 'p_count' must be greater than zero +SELECT generate_range_bounds('a'::TEXT, 'test'::TEXT, 10); /* not ok */ +ERROR: cannot find operator +(text, text) +SELECT generate_range_bounds('a'::TEXT, '1 mon'::INTERVAL, 10); /* not ok */ +ERROR: cannot find operator +(text, interval) +SELECT generate_range_bounds(0::NUMERIC, 1::NUMERIC, 10); /* OK */ + generate_range_bounds +-------------------------- + {0,1,2,3,4,5,6,7,8,9,10} +(1 row) + +SELECT generate_range_bounds('1-jan-2017'::DATE, + '1 day'::INTERVAL, + 4); /* OK */ + generate_range_bounds +---------------------------------------------------------- + {01-01-2017,01-02-2017,01-03-2017,01-04-2017,01-05-2017} +(1 row) + +SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ +WARNING: table "pg_class" is not partitioned + check_range_available +----------------------- + +(1 row) + +/* check invoke_on_partition_created_callback() */ +CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ + begin + raise warning 'arg: %', arg::text; + end +$$ LANGUAGE plpgsql; +/* Invalid args */ +SELECT invoke_on_partition_created_callback(NULL, 'calamity.part_test', 1); +ERROR: 'parent_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', NULL, 1); +ERROR: 'partition_relid' should not be NULL +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 0); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', 1); +ERROR: callback function 1 does not exist +SELECT invoke_on_partition_created_callback('calamity.part_test', 'calamity.part_test', NULL); + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* HASH */ +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure); +WARNING: arg: {"parent": null, "parttype": "1", "partition": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +/* RANGE */ +SELECT invoke_on_partition_created_callback('calamity.part_test'::regclass, 'pg_class'::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": "part_test", "parttype": "2", "partition": "pg_class", "range_max": null, "range_min": null, "parent_schema": "calamity", "partition_schema": "pg_catalog"} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL::int, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, 1, NULL); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": null, "range_min": "1", "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +SELECT invoke_on_partition_created_callback(0::regclass, 1::regclass, 'calamity.dummy_cb(jsonb)'::regprocedure, NULL, 1); +WARNING: arg: {"parent": null, "parttype": "2", "partition": null, "range_max": "1", "range_min": null, "parent_schema": null, "partition_schema": null} + invoke_on_partition_created_callback +-------------------------------------- + +(1 row) + +DROP FUNCTION calamity.dummy_cb(arg jsonb); +/* check function add_to_pathman_config() -- PHASE #1 */ +SELECT add_to_pathman_config(NULL, 'val'); /* no table */ +ERROR: 'parent_relid' should not be NULL +SELECT add_to_pathman_config(0::REGCLASS, 'val'); /* no table (oid) */ +ERROR: identifier "0" must be normal Oid +SELECT add_to_pathman_config('calamity.part_test', NULL); /* no expr */ +ERROR: 'expression' should not be NULL +SELECT add_to_pathman_config('calamity.part_test', 'V_A_L'); /* wrong expr */ +ERROR: failed to analyze partitioning expression "V_A_L" +SELECT add_to_pathman_config('calamity.part_test', 'val'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT disable_pathman_for('calamity.part_test'); + disable_pathman_for +--------------------- + +(1 row) + +/* check function add_to_pathman_config() -- PHASE #2 */ +CREATE TABLE calamity.part_ok(val serial); +INSERT INTO calamity.part_ok SELECT generate_series(1, 2); +SELECT create_hash_partitions('calamity.part_ok', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +CREATE TABLE calamity.wrong_partition (LIKE calamity.part_test) INHERITS (calamity.part_test); /* wrong partition w\o constraints */ +NOTICE: merging column "val" with inherited definition +SELECT add_to_pathman_config('calamity.part_test', 'val'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: constraint "pathman_wrong_partition_check" of partition "wrong_partition" does not exist +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val = 1 OR val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +ALTER TABLE calamity.wrong_partition +ADD CONSTRAINT pathman_wrong_partition_check +CHECK (val >= 10 AND val = 2); /* wrong constraint */ +SELECT add_to_pathman_config('calamity.part_test', 'val', '10'); +ERROR: wrong constraint format for RANGE partition "wrong_partition" +EXPLAIN (COSTS OFF) SELECT * FROM calamity.part_ok; /* check that pathman is enabled */ + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on part_ok_0 part_ok_1 + -> Seq Scan on part_ok_1 part_ok_2 + -> Seq Scan on part_ok_2 part_ok_3 + -> Seq Scan on part_ok_3 part_ok_4 +(5 rows) + +ALTER TABLE calamity.wrong_partition DROP CONSTRAINT pathman_wrong_partition_check; +/* check GUC variable */ +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + on +(1 row) + +/* check function create_hash_partitions_internal() (called for the 2nd time) */ +CREATE TABLE calamity.hash_two_times(val serial); +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: table "hash_two_times" is not partitioned +SELECT create_hash_partitions('calamity.hash_two_times', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT create_hash_partitions_internal('calamity.hash_two_times', 'val', 2); +ERROR: cannot add new HASH partitions +/* check function disable_pathman_for() */ +CREATE TABLE calamity.to_be_disabled(val INT NOT NULL); +SELECT create_hash_partitions('calamity.to_be_disabled', 'val', 3); /* add row to main config */ + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT set_enable_parent('calamity.to_be_disabled', true); /* add row to params */ + set_enable_parent +------------------- + +(1 row) + +SELECT disable_pathman_for('calamity.to_be_disabled'); /* should delete both rows */ + disable_pathman_for +--------------------- + +(1 row) + +SELECT count(*) FROM pathman_config WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pathman_config_params WHERE partrel = 'calamity.to_be_disabled'::REGCLASS; + count +------- + 0 +(1 row) + +/* check function get_part_range_by_idx() */ +CREATE TABLE calamity.test_range_idx(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_idx', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, 1, NULL::INT4); /* not ok */ +ERROR: 'parent_relid' should not be NULL +SELECT get_part_range('calamity.test_range_idx', NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_idx' should not be NULL +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_idx', -2, NULL::INT4); /* not ok */ +ERROR: negative indices other than -1 (last partition) are not allowed +SELECT get_part_range('calamity.test_range_idx', 4, NULL::INT4); /* not ok */ +ERROR: partition #4 does not exist (total amount is 1) +SELECT get_part_range('calamity.test_range_idx', 0, NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_idx CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function get_part_range_by_oid() */ +CREATE TABLE calamity.test_range_oid(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.test_range_oid', 'val', 1, 10, 1); + create_range_partitions +------------------------- + 1 +(1 row) + +SELECT get_part_range(NULL, NULL::INT4); /* not ok */ +ERROR: 'partition_relid' should not be NULL +SELECT get_part_range('pg_class', NULL::INT4); /* not ok */ +ERROR: relation "pg_class" is not a partition +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT2); /* not ok */ +ERROR: pg_typeof(dummy) should be integer +SELECT get_part_range('calamity.test_range_oid_1', NULL::INT4); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +DROP TABLE calamity.test_range_oid CASCADE; +NOTICE: drop cascades to 2 other objects +/* check function merge_range_partitions() */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +ERROR: cannot merge partitions +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ +ERROR: cannot merge partitions +CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); +CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); +SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ +ERROR: cannot merge partitions +DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +NOTICE: drop cascades to 6 other objects +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +NOTICE: drop cascades to table calamity.wrong_partition +DROP TABLE calamity.part_ok CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE calamity.hash_two_times CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE calamity.to_be_disabled CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +SET pg_pathman.enable = true; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SET pg_pathman.enable = false; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +RESET pg_pathman.enable; +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------- + * Special tests (pathman_cache_stats) + * ------------------------------------- + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check that cache loading is lazy */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Change this setting for code coverage */ +SET pg_pathman.enable_bounds_cache = false; +/* check view pathman_cache_stats (bounds cache disabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* Restore this GUC */ +SET pg_pathman.enable_bounds_cache = true; +/* check view pathman_cache_stats (bounds cache enabled) */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 11 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +/* check that parents cache has been flushed after partition was dropped */ +CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); +SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on test_pathman_cache_stats_1 + -> Seq Scan on test_pathman_cache_stats_2 + -> Seq Scan on test_pathman_cache_stats_3 + -> Seq Scan on test_pathman_cache_stats_4 + -> Seq Scan on test_pathman_cache_stats_5 + -> Seq Scan on test_pathman_cache_stats_6 + -> Seq Scan on test_pathman_cache_stats_7 + -> Seq Scan on test_pathman_cache_stats_8 + -> Seq Scan on test_pathman_cache_stats_9 + -> Seq Scan on test_pathman_cache_stats_10 +(11 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); + drop_range_partition +------------------------------------- + calamity.test_pathman_cache_stats_1 +(1 row) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 9 + partition parents cache | 9 +(3 rows) + +DROP TABLE calamity.test_pathman_cache_stats CASCADE; +NOTICE: drop cascades to 10 other objects +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 0 + partition parents cache | 0 +(3 rows) + +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; +/* + * ------------------------------------------ + * Special tests (uninitialized pg_pathman) + * ------------------------------------------ + */ +CREATE SCHEMA calamity; +CREATE EXTENSION pg_pathman; +/* check function pathman_cache_search_relid() */ +CREATE TABLE calamity.survivor(val INT NOT NULL); +SELECT create_range_partitions('calamity.survivor', 'val', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +DROP EXTENSION pg_pathman CASCADE; +SET pg_pathman.enable = f; /* DON'T LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been disabled +CREATE EXTENSION pg_pathman; +SHOW pg_pathman.enable; + pg_pathman.enable +------------------- + off +(1 row) + +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* not ok */ +ERROR: pg_pathman is disabled +SELECT * FROM pathman_partition_list; /* not ok */ +ERROR: pg_pathman is not initialized yet +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* not ok */ +ERROR: pg_pathman is disabled +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on survivor survivor_1 + -> Seq Scan on survivor_1 survivor_2 + -> Seq Scan on survivor_2 survivor_3 +(4 rows) + +SET pg_pathman.enable = t; /* LOAD CONFIG */ +NOTICE: RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes and some other options have been enabled +SELECT add_to_pathman_config('calamity.survivor', 'val', '10'); /* OK */ + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT * FROM pathman_partition_list; /* OK */ + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + calamity.survivor | calamity.survivor_1 | 2 | val | 1 | 11 + calamity.survivor | calamity.survivor_2 | 2 | val | 11 | 21 +(2 rows) + +SELECT get_part_range('calamity.survivor', 0, NULL::INT); /* OK */ + get_part_range +---------------- + {1,11} +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ + QUERY PLAN +------------------------------ + Append + -> Seq Scan on survivor_1 + -> Seq Scan on survivor_2 +(3 rows) + +DROP TABLE calamity.survivor CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_callbacks.out b/expected/pathman_callbacks.out index 2f8e0166..8427dae7 100644 --- a/expected/pathman_callbacks.out +++ b/expected/pathman_callbacks.out @@ -1,14 +1,15 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; -/* Check callbacks */ +/* callback #1 */ CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN RAISE WARNING 'callback arg: %', args::TEXT; END $$ language plpgsql; -/* callback is in public namespace, must be schema-qualified */ +/* callback #2 */ CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) RETURNS VOID AS $$ BEGIN @@ -184,15 +185,11 @@ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_2", INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_3", "range_max": "301", "range_min": "201", "parent_schema": "callbacks", "partition_schema": "callbacks"} +BEGIN; DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ ERROR: callback function "callbacks.abc_on_part_created_callback(jsonb)" does not exist -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) -RETURNS VOID AS $$ -BEGIN - RAISE WARNING 'callback arg: %', args::TEXT; -END -$$ language plpgsql; +ROLLBACK; INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ WARNING: callback arg: {"parent": "abc", "parttype": "2", "partition": "abc_5", "range_max": "401", "range_min": "301", "parent_schema": "callbacks", "partition_schema": "callbacks"} DROP TABLE callbacks.abc CASCADE; @@ -211,22 +208,22 @@ CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) RETURNS VOID AS $$ DECLARE - relation regclass; + relation regclass; parent_rel regclass; BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; - -- drop "old" partitions - FOR relation IN (SELECT partition FROM + -- drop "old" partitions + FOR relation IN (SELECT partition FROM (SELECT partition, range_min::INT4 FROM pathman_partition_list WHERE parent = parent_rel ORDER BY range_min::INT4 DESC OFFSET 4) t -- remain 4 last partitions ORDER BY range_min) - LOOP - RAISE NOTICE 'dropping partition %', relation; - PERFORM drop_range_partition(relation); - END LOOP; + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; END $$ LANGUAGE plpgsql; SELECT * FROM pathman_partition_list @@ -414,6 +411,8 @@ ORDER BY range_min::INT4; DROP TABLE callbacks.abc CASCADE; NOTICE: drop cascades to 5 other objects -DROP SCHEMA callbacks CASCADE; -NOTICE: drop cascades to 2 other objects +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_check.out b/expected/pathman_check.out new file mode 100644 index 00000000..e69de29b diff --git a/expected/pathman_column_type.out b/expected/pathman_column_type.out index 4382db1f..c77acbb2 100644 --- a/expected/pathman_column_type.out +++ b/expected/pathman_column_type.out @@ -1,3 +1,7 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -19,45 +23,75 @@ SELECT * FROM test_column_type.test; ----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------- - test_column_type.test | +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} (1 row) +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; val ----- (0 rows) -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - partrel | cooked_expr ------------------------+------------------------------------------------------------------------------------------------------------------------- - test_column_type.test | {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} -(1 row) - -SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 10 - partition dispatch cache | 1 - partition parents cache | 10 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -101,14 +135,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; @@ -119,14 +153,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; @@ -136,14 +170,14 @@ SELECT * FROM test_column_type.test; ----+----- (0 rows) -SELECT context, entries FROM pathman_cache_stats ORDER BY context; - context | entries ---------------------------+--------- - maintenance | 0 - partition bounds cache | 5 - partition dispatch cache | 1 - partition parents cache | 5 -(4 rows) +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -165,5 +199,5 @@ NOTICE: 0 rows copied from test_column_type.test_4 (1 row) DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_1.out b/expected/pathman_column_type_1.out new file mode 100644 index 00000000..06b61387 --- /dev/null +++ b/expected/pathman_column_type_1.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_column_type_2.out b/expected/pathman_column_type_2.out new file mode 100644 index 00000000..0fbd0793 --- /dev/null +++ b/expected/pathman_column_type_2.out @@ -0,0 +1,203 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_column_type; +/* + * RANGE partitioning. + */ +/* create new table (val int) */ +CREATE TABLE test_column_type.test(val INT4 NOT NULL); +SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +--------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + integer +(1 row) + +/* change column's type (should also flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + get_partition_key_type +------------------------ + numeric +(1 row) + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); + get_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); + get_cached_partition_cooked_key +----------------------------------------------------------------------------------------------------------------------------------------------- + {VAR :varno 1 :varattno 1 :vartype 1700 :vartypmod -1 :varcollid 0 :varnullingrels (b) :varlevelsup 0 :varnosyn 1 :varattnosyn 1 :location 8} +(1 row) + +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + val +----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 10 + partition parents cache | 10 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | val +-------------------------+----- + test_column_type.test_1 | 1 +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 +NOTICE: 0 rows copied from test_column_type.test_5 +NOTICE: 0 rows copied from test_column_type.test_6 +NOTICE: 0 rows copied from test_column_type.test_7 +NOTICE: 0 rows copied from test_column_type.test_8 +NOTICE: 0 rows copied from test_column_type.test_9 +NOTICE: 0 rows copied from test_column_type.test_10 + drop_partitions +----------------- + 10 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +/* + * HASH partitioning. + */ +/* create new table (id int, val int) */ +CREATE TABLE test_column_type.test(id INT4 NOT NULL, val INT4); +SELECT create_hash_partitions('test_column_type.test', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* make sure that bounds and dispatch info has been cached */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should NOT work) */ +ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; +ERROR: cannot change type of column "id" of table "test" partitioned by HASH +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* change column's type (should flush caches) */ +ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; +/* make sure that everything works properly */ +SELECT * FROM test_column_type.test; + id | val +----+----- +(0 rows) + +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; + context | entries +-------------------------+--------- + maintenance | 0 + partition bounds cache | 5 + partition parents cache | 5 +(3 rows) + +/* check insert dispatching */ +INSERT INTO test_column_type.test VALUES (1); +SELECT tableoid::regclass, * FROM test_column_type.test; + tableoid | id | val +-------------------------+----+----- + test_column_type.test_0 | 1 | +(1 row) + +SELECT drop_partitions('test_column_type.test'); +NOTICE: 1 rows copied from test_column_type.test_0 +NOTICE: 0 rows copied from test_column_type.test_1 +NOTICE: 0 rows copied from test_column_type.test_2 +NOTICE: 0 rows copied from test_column_type.test_3 +NOTICE: 0 rows copied from test_column_type.test_4 + drop_partitions +----------------- + 5 +(1 row) + +DROP TABLE test_column_type.test CASCADE; +DROP SCHEMA test_column_type; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte.out b/expected/pathman_cte.out index c7edd5a4..33821ac0 100644 --- a/expected/pathman_cte.out +++ b/expected/pathman_cte.out @@ -1,10 +1,14 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; -/* - * Test simple CTE queries - */ CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, @@ -267,6 +271,7 @@ SELECT * FROM test; (4 rows) -DROP SCHEMA test_cte CASCADE; -NOTICE: drop cascades to 3 other objects +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_1.out b/expected/pathman_cte_1.out new file mode 100644 index 00000000..5e30e188 --- /dev/null +++ b/expected/pathman_cte_1.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 + -> Seq Scan on range_rel_3 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +------------------------ + Seq Scan on hash_rel_1 + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_2.out b/expected/pathman_cte_2.out new file mode 100644 index 00000000..b9bf8730 --- /dev/null +++ b/expected/pathman_cte_2.out @@ -0,0 +1,253 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + Delete on cte_del_xacts_2 t_3 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Seq Scan on cte_del_xacts_2 t_3 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(13 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t_1 + Delete on cte_del_xacts_1 t_2 + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Append + -> Seq Scan on cte_del_xacts t_1 + -> Seq Scan on cte_del_xacts_1 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(11 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_cte_3.out b/expected/pathman_cte_3.out new file mode 100644 index 00000000..a7f3acd0 --- /dev/null +++ b/expected/pathman_cte_3.out @@ -0,0 +1,266 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_cte; +CREATE TABLE test_cte.range_rel ( + id INT4, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO test_cte.range_rel (dt, txt) +SELECT g, md5(g::TEXT) +FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT create_range_partitions('test_cte.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-15') +SELECT * FROM ttt; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_2 range_rel_1 + -> Seq Scan on range_rel_3 range_rel_2 + Filter: (dt < 'Sun Mar 15 00:00:00 2015'::timestamp without time zone) +(4 rows) + +DROP TABLE test_cte.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +CREATE TABLE test_cte.hash_rel ( + id INT4, + value INTEGER NOT NULL); +INSERT INTO test_cte.hash_rel VALUES (1, 1); +INSERT INTO test_cte.hash_rel VALUES (2, 2); +INSERT INTO test_cte.hash_rel VALUES (3, 3); +SELECT create_hash_partitions('test_cte.hash_rel', 'value', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* perform a query */ +EXPLAIN (COSTS OFF) + WITH ttt AS (SELECT * FROM test_cte.hash_rel WHERE value = 2) +SELECT * FROM ttt; + QUERY PLAN +--------------------------------- + Seq Scan on hash_rel_1 hash_rel + Filter: (value = 2) +(2 rows) + +DROP TABLE test_cte.hash_rel CASCADE; +NOTICE: drop cascades to 3 other objects +/* + * Test CTE query - by @parihaaraka (add varno to WalkerContext) + */ +CREATE TABLE test_cte.cte_del_xacts (id BIGSERIAL PRIMARY KEY, pdate DATE NOT NULL); +INSERT INTO test_cte.cte_del_xacts (pdate) +SELECT gen_date +FROM generate_series('2016-01-01'::date, '2016-04-9'::date, '1 day') AS gen_date; +CREATE TABLE test_cte.cte_del_xacts_specdata +( + tid BIGINT PRIMARY KEY, + test_mode SMALLINT, + state_code SMALLINT NOT NULL DEFAULT 8, + regtime TIMESTAMP WITHOUT TIME ZONE NOT NULL +); +INSERT INTO test_cte.cte_del_xacts_specdata VALUES (1, 1, 1, current_timestamp); /* for subquery test */ +/* create 2 partitions */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '50 days'::interval); + create_range_partitions +------------------------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + Delete on cte_del_xacts_2 t_2 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash Join + Hash Cond: ((t_2.id = cte_del_xacts_specdata.tid) AND (t_2.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_2 t_2 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(22 rows) + +SELECT drop_partitions('test_cte.cte_del_xacts'); /* now drop partitions */ +NOTICE: 50 rows copied from test_cte.cte_del_xacts_1 +NOTICE: 50 rows copied from test_cte.cte_del_xacts_2 + drop_partitions +----------------- + 2 +(1 row) + +/* create 1 partition */ +SELECT create_range_partitions('test_cte.cte_del_xacts'::regclass, 'pdate', + '2016-01-01'::date, '1 year'::interval); + create_range_partitions +------------------------- + 1 +(1 row) + +/* parent enabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', true); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts t + Delete on cte_del_xacts t + Delete on cte_del_xacts_1 t_1 + -> Hash Join + Hash Cond: ((cte_del_xacts_specdata.tid = t.id) AND ((cte_del_xacts_specdata.regtime)::date = t.pdate)) + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) + -> Hash + -> Seq Scan on cte_del_xacts t + -> Hash Join + Hash Cond: ((t_1.id = cte_del_xacts_specdata.tid) AND (t_1.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t_1 + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(15 rows) + +/* parent disabled! */ +SELECT set_enable_parent('test_cte.cte_del_xacts', false); + set_enable_parent +------------------- + +(1 row) + +EXPLAIN (COSTS OFF) +WITH tmp AS ( + SELECT tid, test_mode, regtime::DATE AS pdate, state_code + FROM test_cte.cte_del_xacts_specdata) +DELETE FROM test_cte.cte_del_xacts t USING tmp +WHERE t.id = tmp.tid AND t.pdate = tmp.pdate AND tmp.test_mode > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Delete on cte_del_xacts_1 t + -> Hash Join + Hash Cond: ((t.id = cte_del_xacts_specdata.tid) AND (t.pdate = (cte_del_xacts_specdata.regtime)::date)) + -> Seq Scan on cte_del_xacts_1 t + -> Hash + -> Seq Scan on cte_del_xacts_specdata + Filter: (test_mode > 0) +(7 rows) + +/* create stub pl/PgSQL function */ +CREATE OR REPLACE FUNCTION test_cte.cte_del_xacts_stab(name TEXT) +RETURNS smallint AS +$$ +begin + return 2::smallint; +end +$$ +LANGUAGE plpgsql STABLE; +/* test subquery planning */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +/* test subquery planning (one more time) */ +WITH tmp AS ( + SELECT tid FROM test_cte.cte_del_xacts_specdata + WHERE state_code != test_cte.cte_del_xacts_stab('test')) +SELECT * FROM test_cte.cte_del_xacts t JOIN tmp ON t.id = tmp.tid; + id | pdate | tid +----+------------+----- + 1 | 01-01-2016 | 1 +(1 row) + +DROP FUNCTION test_cte.cte_del_xacts_stab(TEXT); +DROP TABLE test_cte.cte_del_xacts, test_cte.cte_del_xacts_specdata CASCADE; +NOTICE: drop cascades to 2 other objects +/* Test recursive CTE */ +CREATE TABLE test_cte.recursive_cte_test_tbl(id INT NOT NULL, name TEXT NOT NULL); +SELECT create_hash_partitions('test_cte.recursive_cte_test_tbl', 'id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||id FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 1) FROM generate_series(1,100) f(id); +INSERT INTO test_cte.recursive_cte_test_tbl (id, name) +SELECT id, 'name'||(id + 2) FROM generate_series(1,100) f(id); +SELECT * FROM test_cte.recursive_cte_test_tbl WHERE id = 5; + id | name +----+------- + 5 | name5 + 5 | name6 + 5 | name7 +(3 rows) + +WITH RECURSIVE test AS ( + SELECT min(name) AS name + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 + UNION ALL + SELECT (SELECT min(name) + FROM test_cte.recursive_cte_test_tbl + WHERE id = 5 AND name > test.name) + FROM test + WHERE name IS NOT NULL) +SELECT * FROM test; + name +------- + name5 + name6 + name7 + +(4 rows) + +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA test_cte; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_declarative.out b/expected/pathman_declarative.out new file mode 100644 index 00000000..2915ecfb --- /dev/null +++ b/expected/pathman_declarative.out @@ -0,0 +1,107 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_declarative_1.out b/expected/pathman_declarative_1.out new file mode 100644 index 00000000..dede4941 --- /dev/null +++ b/expected/pathman_declarative_1.out @@ -0,0 +1,107 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +ERROR: table "range_rel" is not partitioned +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 + test.range_rel | test.r2 | 2 | dt | 05-01-2015 | 06-01-2015 +(5 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | +Check constraints: + "pathman_r2_check" CHECK (dt >= '05-01-2015'::date AND dt < '06-01-2015'::date) +Inherits: test.range_rel + +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +----------------+------------------+----------+------+------------+------------ + test.range_rel | test.range_rel_1 | 2 | dt | 01-01-2015 | 02-01-2015 + test.range_rel | test.range_rel_2 | 2 | dt | 02-01-2015 | 03-01-2015 + test.range_rel | test.range_rel_3 | 2 | dt | 03-01-2015 | 04-01-2015 + test.range_rel | test.range_rel_4 | 2 | dt | 04-01-2015 | 05-01-2015 +(4 rows) + +\d+ test.r2; + Table "test.r2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | + dt | date | | not null | | plain | | + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ERROR: pg_pathman only supports queries for range partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ERROR: specified range [05-01-2014, 06-01-2015) overlaps with existing partitions +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + Table "test.r4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+--------------------------------------------+---------+--------------+------------- + id | integer | | not null | nextval('test.range_rel_id_seq'::regclass) | plain | | + dt | date | | not null | | plain | | +Indexes: + "r4_pkey" PRIMARY KEY, btree (id) +Check constraints: + "pathman_r4_check" CHECK (dt >= '06-01-2015'::date AND dt < '01-01-2016'::date) +Inherits: test.range_rel + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +NOTICE: relation "nonexistent_table" does not exist, skipping +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; +NOTICE: relation "nonexistent_table" does not exist, skipping +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 6 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_domains.out b/expected/pathman_domains.out index f78a73dc..cc32ce0c 100644 --- a/expected/pathman_domains.out +++ b/expected/pathman_domains.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); @@ -40,13 +41,13 @@ SELECT prepend_range_partition('domains.dom_table'); SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); merge_range_partitions ------------------------ - + domains.dom_table_1 (1 row) SELECT split_range_partition('domains.dom_table_1', 50); split_range_partition ----------------------- - {1,201} + domains.dom_table_14 (1 row) INSERT INTO domains.dom_table VALUES(1101); @@ -123,6 +124,8 @@ ORDER BY "partition"::TEXT; domains.dom_table | domains.dom_table_4 | 1 | val | | (5 rows) -DROP SCHEMA domains CASCADE; -NOTICE: drop cascades to 7 other objects +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_domains_1.out b/expected/pathman_domains_1.out new file mode 100644 index 00000000..aaa0867f --- /dev/null +++ b/expected/pathman_domains_1.out @@ -0,0 +1,131 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA domains; +CREATE DOMAIN domains.dom_test AS numeric CHECK (value < 1200); +CREATE TABLE domains.dom_table(val domains.dom_test NOT NULL); +INSERT INTO domains.dom_table SELECT generate_series(1, 999); +SELECT create_range_partitions('domains.dom_table', 'val', 1, 100); + create_range_partitions +------------------------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 250; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_1 + -> Seq Scan on dom_table_2 + -> Seq Scan on dom_table_3 + Filter: ((val)::numeric < '250'::numeric) +(5 rows) + +INSERT INTO domains.dom_table VALUES(1500); +ERROR: value for domain domains.dom_test violates check constraint "dom_test_check" +INSERT INTO domains.dom_table VALUES(-10); +SELECT append_range_partition('domains.dom_table'); + append_range_partition +------------------------ + domains.dom_table_12 +(1 row) + +SELECT prepend_range_partition('domains.dom_table'); + prepend_range_partition +------------------------- + domains.dom_table_13 +(1 row) + +SELECT merge_range_partitions('domains.dom_table_1', 'domains.dom_table_2'); + merge_range_partitions +------------------------ + domains.dom_table_1 +(1 row) + +SELECT split_range_partition('domains.dom_table_1', 50); + split_range_partition +----------------------- + domains.dom_table_14 +(1 row) + +INSERT INTO domains.dom_table VALUES(1101); +EXPLAIN (COSTS OFF) +SELECT * FROM domains.dom_table +WHERE val < 450; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on dom_table_13 dom_table_1 + -> Seq Scan on dom_table_11 dom_table_2 + -> Seq Scan on dom_table_1 dom_table_3 + -> Seq Scan on dom_table_14 dom_table_4 + -> Seq Scan on dom_table_3 dom_table_5 + -> Seq Scan on dom_table_4 dom_table_6 + -> Seq Scan on dom_table_5 dom_table_7 + Filter: ((val)::numeric < '450'::numeric) +(9 rows) + +SELECT * FROM pathman_partition_list +ORDER BY range_min::INT, range_max::INT; + parent | partition | parttype | expr | range_min | range_max +-------------------+----------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_13 | 2 | val | -199 | -99 + domains.dom_table | domains.dom_table_11 | 2 | val | -99 | 1 + domains.dom_table | domains.dom_table_1 | 2 | val | 1 | 50 + domains.dom_table | domains.dom_table_14 | 2 | val | 50 | 201 + domains.dom_table | domains.dom_table_3 | 2 | val | 201 | 301 + domains.dom_table | domains.dom_table_4 | 2 | val | 301 | 401 + domains.dom_table | domains.dom_table_5 | 2 | val | 401 | 501 + domains.dom_table | domains.dom_table_6 | 2 | val | 501 | 601 + domains.dom_table | domains.dom_table_7 | 2 | val | 601 | 701 + domains.dom_table | domains.dom_table_8 | 2 | val | 701 | 801 + domains.dom_table | domains.dom_table_9 | 2 | val | 801 | 901 + domains.dom_table | domains.dom_table_10 | 2 | val | 901 | 1001 + domains.dom_table | domains.dom_table_12 | 2 | val | 1001 | 1101 + domains.dom_table | domains.dom_table_15 | 2 | val | 1101 | 1201 +(14 rows) + +SELECT drop_partitions('domains.dom_table'); +NOTICE: 49 rows copied from domains.dom_table_1 +NOTICE: 100 rows copied from domains.dom_table_3 +NOTICE: 100 rows copied from domains.dom_table_4 +NOTICE: 100 rows copied from domains.dom_table_5 +NOTICE: 100 rows copied from domains.dom_table_6 +NOTICE: 100 rows copied from domains.dom_table_7 +NOTICE: 100 rows copied from domains.dom_table_8 +NOTICE: 100 rows copied from domains.dom_table_9 +NOTICE: 99 rows copied from domains.dom_table_10 +NOTICE: 1 rows copied from domains.dom_table_11 +NOTICE: 0 rows copied from domains.dom_table_12 +NOTICE: 0 rows copied from domains.dom_table_13 +NOTICE: 151 rows copied from domains.dom_table_14 +NOTICE: 1 rows copied from domains.dom_table_15 + drop_partitions +----------------- + 14 +(1 row) + +SELECT create_hash_partitions('domains.dom_table', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +SELECT * FROM pathman_partition_list +ORDER BY "partition"::TEXT; + parent | partition | parttype | expr | range_min | range_max +-------------------+---------------------+----------+------+-----------+----------- + domains.dom_table | domains.dom_table_0 | 1 | val | | + domains.dom_table | domains.dom_table_1 | 1 | val | | + domains.dom_table | domains.dom_table_2 | 1 | val | | + domains.dom_table | domains.dom_table_3 | 1 | val | | + domains.dom_table | domains.dom_table_4 | 1 | val | | +(5 rows) + +DROP TABLE domains.dom_table CASCADE; +NOTICE: drop cascades to 5 other objects +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_dropped_cols.out b/expected/pathman_dropped_cols.out new file mode 100644 index 00000000..826931d3 --- /dev/null +++ b/expected/pathman_dropped_cols.out @@ -0,0 +1,209 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; +/* + * we should be able to manage tables with dropped columns + */ +create table test_range(a int, b int, key int not null); +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + prepend_range_partition +------------------------- + test_range_3 +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +------------+--------------+----------+------+-----------+----------- + test_range | test_range_1 | 2 | key | 1 | 11 + test_range | test_range_2 | 2 | key | 11 | 21 + test_range | test_range_3 | 2 | key | -9 | 1 +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; + pg_get_constraintdef +------------------------------- + CHECK (key >= 1 AND key < 11) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + pg_get_constraintdef +------------------------------------------ + CHECK (key >= '-9'::integer AND key < 1) +(1 row) + +drop table test_range cascade; +NOTICE: drop cascades to 4 other objects +create table test_hash(a int, b int, key int not null); +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + replace_hash_partition +------------------------ + test_dummy +(1 row) + +select * from pathman_partition_list order by parent, partition; + parent | partition | parttype | expr | range_min | range_max +-----------+-------------+----------+------+-----------+----------- + test_hash | test_hash_0 | 1 | key | | + test_hash | test_hash_1 | 1 | key | | + test_hash | test_dummy | 1 | key | | +(3 rows) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 1) +(1 row) + +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; + pg_get_constraintdef +------------------------------------------------- + CHECK (get_hash_part_idx(hashint4(key), 3) = 2) +(1 row) + +drop table test_hash cascade; +NOTICE: drop cascades to 3 other objects +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); + create_hash_partitions +------------------------ + 3 +(1 row) + +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + set_enable_parent +------------------- + +(1 row) + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +-- errors usually start here +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXECUTE getbyroot(2); + id | root_id | start_date | num | main | edit_num | edit_date | dict_id +----+---------+------------+-------+------+----------+-----------+--------- + 3 | 2 | 10-10-2010 | num_1 | 2 | | | 3 + 4 | 2 | 10-10-2010 | num_2 | 1 | | | 3 +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + QUERY PLAN +---------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (root_dict.root_id = $1) + -> Bitmap Heap Scan on root_dict_0 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_0_root_id_idx + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_1 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_1_root_id_idx + Index Cond: (root_id = $1) + -> Bitmap Heap Scan on root_dict_2 root_dict + Recheck Cond: (root_id = $1) + -> Bitmap Index Scan on root_dict_2_root_id_idx + Index Cond: (root_id = $1) +(14 rows) + +DEALLOCATE getbyroot; +DROP TABLE root_dict CASCADE; +NOTICE: drop cascades to 3 other objects +DROP SCHEMA dropped_cols; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions.out b/expected/pathman_expressions.out index 948fdd5e..cd629b8e 100644 --- a/expected/pathman_expressions.out +++ b/expected/pathman_expressions.out @@ -1,3 +1,12 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -160,42 +169,38 @@ SELECT *, tableoid::REGCLASS FROM test_exprs.composite; (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; - QUERY PLAN ----------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------ Append -> Seq Scan on composite_1 -> Seq Scan on composite_2 -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) (5 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_2 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_3 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) -> Seq Scan on composite_4 - Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) (9 rows) EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); - QUERY PLAN ----------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------- Append -> Seq Scan on composite_1 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_2 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -> Seq Scan on composite_3 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) - -> Seq Scan on composite_4 - Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) -(9 rows) + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) DROP TABLE test_exprs.composite CASCADE; NOTICE: drop cascades to 5 other objects @@ -234,7 +239,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using system attributes */ SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); ERROR: failed to analyze partitioning expression "xmin" @@ -244,7 +249,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using subqueries */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value, (select oid from pg_class limit 1)', @@ -256,7 +261,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using mutable expression */ SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); ERROR: failed to analyze partitioning expression "random()" @@ -266,7 +271,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using broken parentheses */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); ERROR: failed to parse partitioning expression "value * value2))" @@ -276,7 +281,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Try using missing columns */ SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); ERROR: failed to analyze partitioning expression "value * value3" @@ -287,7 +292,7 @@ CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 4 at PERFORM +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; @@ -371,7 +376,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Try using mutable expression */ SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', '15 years'::INTERVAL, '1 year'::INTERVAL, 10); @@ -382,7 +387,7 @@ PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERF SQL statement "SELECT public.prepare_for_partitioning(parent_relid, expression, partition_data)" -PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 12 at PERFORM +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM /* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy SELECT * FROM test_exprs.canary WHERE val = 1; @@ -425,50 +430,12 @@ EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-0 Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) (3 rows) -SELECT create_update_triggers('test_exprs.range_rel'); - create_update_triggers ------------------------- - -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 12 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 12 -(1 row) - -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; - count -------- - 65 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_1; - count -------- - 10 -(1 row) - -SELECT COUNT(*) FROM test_exprs.range_rel_2; - count -------- - 24 -(1 row) - -DROP SCHEMA test_exprs CASCADE; -NOTICE: drop cascades to 24 other objects +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_1.out b/expected/pathman_expressions_1.out new file mode 100644 index 00000000..66e3ea75 --- /dev/null +++ b/expected/pathman_expressions_1.out @@ -0,0 +1,445 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on canon_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, '0'::text)::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < ROW(21, '0'::text)::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_2 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) + -> Seq Scan on composite_4 + Filter: (ROW(a, b)::test_exprs.composite < ROW(21, 0)) +(9 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(3 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------------- + Insert on canary_copy + -> Append + -> Seq Scan on canary_0 + Filter: (val = 1) +(4 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(3 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_2.out b/expected/pathman_expressions_2.out new file mode 100644 index 00000000..89bf24ef --- /dev/null +++ b/expected/pathman_expressions_2.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------- + Seq Scan on canon_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hash_rel_0 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +---------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_expressions_3.out b/expected/pathman_expressions_3.out new file mode 100644 index 00000000..eacb1009 --- /dev/null +++ b/expected/pathman_expressions_3.out @@ -0,0 +1,436 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_exprs; +/* + * Test partitioning expression canonicalization process + */ +CREATE TABLE test_exprs.canon(c JSONB NOT NULL); +SELECT create_range_partitions('test_exprs.canon', '(C->>''key'')::int8', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------------------- + ((c ->> 'key'::text))::bigint +(1 row) + +INSERT INTO test_exprs.canon VALUES ('{ "key": 2, "value": 0 }'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + c | tableoid +------------------------+-------------------- + {"key": 2, "value": 0} | test_exprs.canon_1 +(1 row) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 3 other objects +CREATE TABLE test_exprs.canon(val TEXT NOT NULL); +CREATE SEQUENCE test_exprs.canon_seq; +SELECT add_to_pathman_config('test_exprs.canon', 'VAL collate "C"', NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'a'::TEXT, 'b'); + add_range_partition +--------------------- + test_exprs.canon_1 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'b'::TEXT, 'c'); + add_range_partition +--------------------- + test_exprs.canon_2 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'c'::TEXT, 'd'); + add_range_partition +--------------------- + test_exprs.canon_3 +(1 row) + +SELECT add_range_partition('test_exprs.canon', 'd'::TEXT, 'e'); + add_range_partition +--------------------- + test_exprs.canon_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +------------------- + (val COLLATE "C") +(1 row) + +INSERT INTO test_exprs.canon VALUES ('b'); +SELECT *, tableoid::REGCLASS FROM test_exprs.canon; + val | tableoid +-----+-------------------- + b | test_exprs.canon_2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "C" < ALL (array['b', 'c']); + QUERY PLAN +--------------------------- + Seq Scan on canon_1 canon +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.canon WHERE val COLLATE "POSIX" < ALL (array['b', 'c']); + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on canon_1 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_2 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_3 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") + -> Seq Scan on canon_4 + Filter: ((val)::text < 'b'::text COLLATE "POSIX") +(9 rows) + +DROP TABLE test_exprs.canon CASCADE; +NOTICE: drop cascades to 5 other objects +/* + * Test composite key. + */ +CREATE TABLE test_exprs.composite(a INT4 NOT NULL, b TEXT NOT NULL); +CREATE SEQUENCE test_exprs.composite_seq; +SELECT add_to_pathman_config('test_exprs.composite', + '(a, b)::test_exprs.composite', + NULL); + add_to_pathman_config +----------------------- + t +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(1,a)'::test_exprs.composite, + '(10,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_1 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(10,a)'::test_exprs.composite, + '(20,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_2 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(20,a)'::test_exprs.composite, + '(30,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_3 +(1 row) + +SELECT add_range_partition('test_exprs.composite', + '(30,a)'::test_exprs.composite, + '(40,a)'::test_exprs.composite); + add_range_partition +------------------------ + test_exprs.composite_4 +(1 row) + +SELECT expr FROM pathman_config; /* check expression */ + expr +--------------------------------- + ROW(a, b)::test_exprs.composite +(1 row) + +INSERT INTO test_exprs.composite VALUES(2, 'a'); +INSERT INTO test_exprs.composite VALUES(11, 'a'); +INSERT INTO test_exprs.composite VALUES(2, 'b'); +INSERT INTO test_exprs.composite VALUES(50, 'b'); +ERROR: cannot spawn new partition for key '(50,b)' +SELECT *, tableoid::REGCLASS FROM test_exprs.composite; + a | b | tableoid +----+---+------------------------ + 2 | a | test_exprs.composite_1 + 2 | b | test_exprs.composite_1 + 11 | a | test_exprs.composite_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0)::test_exprs.composite; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::test_exprs.composite) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b) < (21, 0)::test_exprs.composite; + QUERY PLAN +-------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_2 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_3 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) + -> Seq Scan on composite_4 + Filter: (ROW(a, b) < '(21,0)'::test_exprs.composite) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.composite WHERE (a, b)::test_exprs.composite < (21, 0); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on composite_1 + -> Seq Scan on composite_2 + -> Seq Scan on composite_3 + Filter: (ROW(a, b)::test_exprs.composite < '(21,0)'::record) +(5 rows) + +DROP TABLE test_exprs.composite CASCADE; +NOTICE: drop cascades to 5 other objects +/* We use this rel to check 'pathman_hooks_enabled' */ +CREATE TABLE test_exprs.canary(val INT4 NOT NULL); +CREATE TABLE test_exprs.canary_copy (LIKE test_exprs.canary); +SELECT create_hash_partitions('test_exprs.canary', 'val', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +/* + * Test HASH + */ +CREATE TABLE test_exprs.hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL, + value2 INTEGER NOT NULL +); +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(1, 5) val; +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', '1 + 1', 4); +ERROR: failed to analyze partitioning expression "1 + 1" +DETAIL: partitioning expression should reference table "hash_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using system attributes */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'xmin', 4); +ERROR: failed to analyze partitioning expression "xmin" +DETAIL: system attributes are not supported +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using subqueries */ +SELECT create_hash_partitions('test_exprs.hash_rel', + 'value, (select oid from pg_class limit 1)', + 4); +ERROR: failed to analyze partitioning expression "value, (select oid from pg_class limit 1)" +DETAIL: subqueries are not allowed in partitioning expression +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using mutable expression */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'random()', 4); +ERROR: failed to analyze partitioning expression "random()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using broken parentheses */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2))', 4); +ERROR: failed to parse partitioning expression "value * value2))" +DETAIL: syntax error at or near ")" +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Try using missing columns */ +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value3', 4); +ERROR: failed to analyze partitioning expression "value * value3" +DETAIL: column "value3" does not exist +HINT: Perhaps you meant to reference the column "hash_rel.value" or the column "hash_rel.value2". +QUERY: SELECT public.validate_expression(parent_relid, expression) +CONTEXT: PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_hash_partitions(regclass,text,integer,boolean,text[],text[]) line 3 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_hash_partitions('test_exprs.hash_rel', 'value * value2', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 5 +(1 row) + +INSERT INTO test_exprs.hash_rel (value, value2) + SELECT val, val * 2 FROM generate_series(6, 10) val; +SELECT COUNT(*) FROM ONLY test_exprs.hash_rel; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM test_exprs.hash_rel; + count +------- + 10 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE value = 5; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on hash_rel_0 hash_rel_1 + Filter: (value = 5) + -> Seq Scan on hash_rel_1 hash_rel_2 + Filter: (value = 5) + -> Seq Scan on hash_rel_2 hash_rel_3 + Filter: (value = 5) + -> Seq Scan on hash_rel_3 hash_rel_4 + Filter: (value = 5) +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.hash_rel WHERE (value * value2) = 5; + QUERY PLAN +---------------------------------- + Seq Scan on hash_rel_0 hash_rel + Filter: ((value * value2) = 5) +(2 rows) + +/* + * Test RANGE + */ +CREATE TABLE test_exprs.range_rel (id SERIAL PRIMARY KEY, dt TIMESTAMP NOT NULL, txt TEXT); +INSERT INTO test_exprs.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2020-04-30', '1 month'::interval) as g; +\set VERBOSITY default +/* Try using constant expression */ +SELECT create_range_partitions('test_exprs.range_rel', '''16 years''::interval', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "'16 years'::interval" +DETAIL: partitioning expression should reference table "range_rel" +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Try using mutable expression */ +SELECT create_range_partitions('test_exprs.range_rel', 'RANDOM()', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); +ERROR: failed to analyze partitioning expression "RANDOM()" +DETAIL: functions in partitioning expression must be marked IMMUTABLE +CONTEXT: SQL statement "SELECT public.validate_expression(parent_relid, expression)" +PL/pgSQL function prepare_for_partitioning(regclass,text,boolean) line 9 at PERFORM +SQL statement "SELECT public.prepare_for_partitioning(parent_relid, + expression, + partition_data)" +PL/pgSQL function create_range_partitions(regclass,text,anyelement,interval,integer,boolean) line 11 at PERFORM +/* Check that 'pathman_hooks_enabled' is true (1 partition in plan) */ +EXPLAIN (COSTS OFF) INSERT INTO test_exprs.canary_copy +SELECT * FROM test_exprs.canary WHERE val = 1; + QUERY PLAN +----------------------------------- + Insert on canary_copy + -> Seq Scan on canary_0 canary + Filter: (val = 1) +(3 rows) + +\set VERBOSITY terse +SELECT create_range_partitions('test_exprs.range_rel', 'AGE(dt, ''2000-01-01''::DATE)', + '15 years'::INTERVAL, '1 year'::INTERVAL, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +INSERT INTO test_exprs.range_rel_1 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +ERROR: new row for relation "range_rel_1" violates check constraint "pathman_range_rel_1_check" +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 4 +(1 row) + +INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('asdf')); +SELECT COUNT(*) FROM test_exprs.range_rel_6; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Seq Scan on range_rel_4 range_rel + Filter: (age(dt, 'Sat Jan 01 00:00:00 2000'::timestamp without time zone) = '@ 18 years'::interval) +(2 rows) + +DROP TABLE test_exprs.canary CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test_exprs.hash_rel CASCADE; +NOTICE: drop cascades to 4 other objects +DROP SCHEMA test_exprs; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_foreign_keys.out b/expected/pathman_foreign_keys.out index 00462c3d..34fc75ad 100644 --- a/expected/pathman_foreign_keys.out +++ b/expected/pathman_foreign_keys.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; /* Check primary keys generation */ @@ -89,6 +90,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM fkeys.messages; DROP TABLE fkeys.messages, fkeys.replies CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA fkeys CASCADE; -NOTICE: drop cascades to 2 other objects +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_gaps.out b/expected/pathman_gaps.out new file mode 100644 index 00000000..530beca9 --- /dev/null +++ b/expected/pathman_gaps.out @@ -0,0 +1,834 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val = 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_1 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 + Filter: (val > 21) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_1_3 +(2 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val = 31) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val = 41) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val = 51) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_1.out b/expected/pathman_gaps_1.out new file mode 100644 index 00000000..b1c0ac34 --- /dev/null +++ b/expected/pathman_gaps_1.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +---------------------- + Seq Scan on test_1_3 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +---------------------- + Seq Scan on test_2_4 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + Filter: (val > 11) + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + Filter: (val > 31) + -> Seq Scan on test_2_5 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_4 + -> Seq Scan on test_2_5 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +---------------------- + Seq Scan on test_3_5 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + Filter: (val > 21) + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + Filter: (val > 41) + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_5 + -> Seq Scan on test_3_6 + -> Seq Scan on test_3_7 + -> Seq Scan on test_3_8 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +---------------------- + Seq Scan on test_4_6 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + Filter: (val > 21) + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + Filter: (val > 51) + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on test_4_6 + -> Seq Scan on test_4_7 + -> Seq Scan on test_4_8 + -> Seq Scan on test_4_9 + -> Seq Scan on test_4_10 + -> Seq Scan on test_4_11 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_gaps_2.out b/expected/pathman_gaps_2.out new file mode 100644 index 00000000..b229be66 --- /dev/null +++ b/expected/pathman_gaps_2.out @@ -0,0 +1,819 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE gaps.test_1_2; +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); + create_range_partitions +------------------------- + 5 +(1 row) + +DROP TABLE gaps.test_2_3; +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); + create_range_partitions +------------------------- + 8 +(1 row) + +DROP TABLE gaps.test_3_4; +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); + create_range_partitions +------------------------- + 11 +(1 row) + +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +-------------+----------------+----------+------+-----------+----------- + gaps.test_1 | gaps.test_1_1 | 2 | val | 1 | 11 + gaps.test_1 | gaps.test_1_3 | 2 | val | 21 | 31 + gaps.test_2 | gaps.test_2_1 | 2 | val | 1 | 11 + gaps.test_2 | gaps.test_2_2 | 2 | val | 11 | 21 + gaps.test_2 | gaps.test_2_4 | 2 | val | 31 | 41 + gaps.test_2 | gaps.test_2_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_1 | 2 | val | 1 | 11 + gaps.test_3 | gaps.test_3_2 | 2 | val | 11 | 21 + gaps.test_3 | gaps.test_3_3 | 2 | val | 21 | 31 + gaps.test_3 | gaps.test_3_5 | 2 | val | 41 | 51 + gaps.test_3 | gaps.test_3_6 | 2 | val | 51 | 61 + gaps.test_3 | gaps.test_3_7 | 2 | val | 61 | 71 + gaps.test_3 | gaps.test_3_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_1 | 2 | val | 1 | 11 + gaps.test_4 | gaps.test_4_2 | 2 | val | 11 | 21 + gaps.test_4 | gaps.test_4_3 | 2 | val | 21 | 31 + gaps.test_4 | gaps.test_4_6 | 2 | val | 51 | 61 + gaps.test_4 | gaps.test_4_7 | 2 | val | 61 | 71 + gaps.test_4 | gaps.test_4_8 | 2 | val | 71 | 81 + gaps.test_4 | gaps.test_4_9 | 2 | val | 81 | 91 + gaps.test_4 | gaps.test_4_10 | 2 | val | 91 | 101 + gaps.test_4 | gaps.test_4_11 | 2 | val | 101 | 111 +(22 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val = 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_1 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_1_1 + -> Seq Scan on test_1_3 test_1_2 + Filter: (val <= 21) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 + Filter: (val > 21) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + QUERY PLAN +----------------------------- + Seq Scan on test_1_3 test_1 +(1 row) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + QUERY PLAN +----------------------------- + Seq Scan on test_2_4 test_2 + Filter: (val = 31) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + Filter: (val <= 31) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_1 + -> Seq Scan on test_2_2 + -> Seq Scan on test_2_4 test_2_3 + -> Seq Scan on test_2_5 test_2_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + Filter: (val > 11) + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_2 test_2_1 + -> Seq Scan on test_2_4 test_2_2 + -> Seq Scan on test_2_5 test_2_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + Filter: (val > 31) + -> Seq Scan on test_2_5 test_2_2 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_2_4 test_2_1 + -> Seq Scan on test_2_5 test_2_2 +(3 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + QUERY PLAN +----------------------------- + Seq Scan on test_3_5 test_3 + Filter: (val = 41) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + Filter: (val <= 41) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_1 + -> Seq Scan on test_3_2 + -> Seq Scan on test_3_3 + -> Seq Scan on test_3_5 test_3_4 + -> Seq Scan on test_3_6 test_3_5 + Filter: (val <= 51) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + Filter: (val > 21) + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_3 test_3_1 + -> Seq Scan on test_3_5 test_3_2 + -> Seq Scan on test_3_6 test_3_3 + -> Seq Scan on test_3_7 test_3_4 + -> Seq Scan on test_3_8 test_3_5 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + Filter: (val > 41) + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_3_5 test_3_1 + -> Seq Scan on test_3_6 test_3_2 + -> Seq Scan on test_3_7 test_3_3 + -> Seq Scan on test_3_8 test_3_4 +(5 rows) + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + QUERY PLAN +----------------------------- + Seq Scan on test_4_6 test_4 + Filter: (val = 51) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + Filter: (val <= 51) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + QUERY PLAN +------------------------------------- + Append + -> Seq Scan on test_4_1 + -> Seq Scan on test_4_2 + -> Seq Scan on test_4_3 + -> Seq Scan on test_4_6 test_4_4 + -> Seq Scan on test_4_7 test_4_5 + Filter: (val <= 61) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + Filter: (val > 21) + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(9 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_3 test_4_1 + -> Seq Scan on test_4_6 test_4_2 + -> Seq Scan on test_4_7 test_4_3 + -> Seq Scan on test_4_8 test_4_4 + -> Seq Scan on test_4_9 test_4_5 + -> Seq Scan on test_4_10 test_4_6 + -> Seq Scan on test_4_11 test_4_7 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + Filter: (val > 51) + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on test_4_6 test_4_1 + -> Seq Scan on test_4_7 test_4_2 + -> Seq Scan on test_4_8 test_4_3 + -> Seq Scan on test_4_9 test_4_4 + -> Seq Scan on test_4_10 test_4_5 + -> Seq Scan on test_4_11 test_4_6 +(7 rows) + +DROP TABLE gaps.test_1 CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE gaps.test_2 CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE gaps.test_3 CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE gaps.test_4 CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_hashjoin.out b/expected/pathman_hashjoin.out new file mode 100644 index 00000000..f5ebabdd --- /dev/null +++ b/expected/pathman_hashjoin.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_1.out b/expected/pathman_hashjoin_1.out new file mode 100644 index 00000000..df6c0174 --- /dev/null +++ b/expected/pathman_hashjoin_1.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Hash Join + Hash Cond: (j2.id = j1.id) + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_1 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_2 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_1 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_2.out b/expected/pathman_hashjoin_2.out new file mode 100644 index 00000000..69ea5762 --- /dev/null +++ b/expected/pathman_hashjoin_2.out @@ -0,0 +1,77 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(13 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_3.out b/expected/pathman_hashjoin_3.out new file mode 100644 index 00000000..e2c8903a --- /dev/null +++ b/expected/pathman_hashjoin_3.out @@ -0,0 +1,76 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_4.out b/expected/pathman_hashjoin_4.out new file mode 100644 index 00000000..e827628f --- /dev/null +++ b/expected/pathman_hashjoin_4.out @@ -0,0 +1,84 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j1.id = j2.id) + -> Hash Join + Hash Cond: (j3.id = j1.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Hash + -> Append + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2_1 + -> Index Scan using range_rel_3_dt_idx on range_rel_3 j2_2 + -> Index Scan using range_rel_4_dt_idx on range_rel_4 j2_3 +(20 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_5.out b/expected/pathman_hashjoin_5.out new file mode 100644 index 00000000..c66a9306 --- /dev/null +++ b/expected/pathman_hashjoin_5.out @@ -0,0 +1,76 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 + Filter: (id IS NOT NULL) +(12 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_hashjoin_6.out b/expected/pathman_hashjoin_6.out new file mode 100644 index 00000000..1c57f49b --- /dev/null +++ b/expected/pathman_hashjoin_6.out @@ -0,0 +1,75 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Hash Join + Hash Cond: (j3.id = j2.id) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 + -> Hash + -> Index Scan using range_rel_2_dt_idx on range_rel_2 j2 +(11 rows) + +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_inserts.out b/expected/pathman_inserts.out index 9e04ae26..16656f18 100644 --- a/expected/pathman_inserts.out +++ b/expected/pathman_inserts.out @@ -1,3 +1,7 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -853,6 +857,171 @@ NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: 256 | 128 | test_inserts.storage_14 (27 rows) +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, storage_11.e + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d, storage_1.e + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d, storage_14.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, storage_11.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b, storage_1.d + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b, storage_14.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_11.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_1 + Output: storage_1.b + -> Seq Scan on test_inserts.storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_14 + Output: storage_14.b +(34 rows) + /* test gap case (missing partition in between) */ CREATE TABLE test_inserts.test_gap(val INT NOT NULL); INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); @@ -867,6 +1036,40 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ ERROR: cannot spawn a partition DROP TABLE test_inserts.test_gap CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_inserts CASCADE; -NOTICE: drop cascades to 19 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_1.out b/expected/pathman_inserts_1.out new file mode 100644 index 00000000..3479c12d --- /dev/null +++ b/expected/pathman_inserts_1.out @@ -0,0 +1,1075 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, i, i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i, i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, b, NULL::integer, d, e + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d, e + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d, e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +---------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, b, NULL::integer, d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b, d + -> Seq Scan on test_inserts.storage_1 storage + Output: b, d + -> Seq Scan on test_inserts.storage_2 storage + Output: b, d + -> Seq Scan on test_inserts.storage_3 storage + Output: b, d + -> Seq Scan on test_inserts.storage_4 storage + Output: b, d + -> Seq Scan on test_inserts.storage_5 storage + Output: b, d + -> Seq Scan on test_inserts.storage_6 storage + Output: b, d + -> Seq Scan on test_inserts.storage_7 storage + Output: b, d + -> Seq Scan on test_inserts.storage_8 storage + Output: b, d + -> Seq Scan on test_inserts.storage_9 storage + Output: b, d + -> Seq Scan on test_inserts.storage_10 storage + Output: b, d + -> Seq Scan on test_inserts.storage_12 storage + Output: b, d + -> Seq Scan on test_inserts.storage_13 storage + Output: b, d + -> Seq Scan on test_inserts.storage_14 storage + Output: b, d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage + Output: b + -> Seq Scan on test_inserts.storage_1 storage + Output: b + -> Seq Scan on test_inserts.storage_2 storage + Output: b + -> Seq Scan on test_inserts.storage_3 storage + Output: b + -> Seq Scan on test_inserts.storage_4 storage + Output: b + -> Seq Scan on test_inserts.storage_5 storage + Output: b + -> Seq Scan on test_inserts.storage_6 storage + Output: b + -> Seq Scan on test_inserts.storage_7 storage + Output: b + -> Seq Scan on test_inserts.storage_8 storage + Output: b + -> Seq Scan on test_inserts.storage_9 storage + Output: b + -> Seq Scan on test_inserts.storage_10 storage + Output: b + -> Seq Scan on test_inserts.storage_12 storage + Output: b + -> Seq Scan on test_inserts.storage_13 storage + Output: b + -> Seq Scan on test_inserts.storage_14 storage + Output: b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_inserts_2.out b/expected/pathman_inserts_2.out new file mode 100644 index 00000000..3c31fc53 --- /dev/null +++ b/expected/pathman_inserts_2.out @@ -0,0 +1,1075 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_inserts; +/* create a partitioned table */ +CREATE TABLE test_inserts.storage(a INT4, b INT4 NOT NULL, c NUMERIC, d TEXT); +INSERT INTO test_inserts.storage SELECT i * 2, i, i, i::text FROM generate_series(1, 100) i; +CREATE UNIQUE INDEX ON test_inserts.storage(a); +SELECT create_range_partitions('test_inserts.storage', 'b', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* attach before and after insertion triggers to partitioned table */ +CREATE OR REPLACE FUNCTION test_inserts.print_cols_before_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'BEFORE INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION test_inserts.print_cols_after_change() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE 'AFTER INSERTION TRIGGER ON TABLE % HAS EXPIRED. INSERTED ROW: %', tg_table_name, new; + RETURN new; +END; +$$ LANGUAGE plpgsql; +/* set triggers on existing first partition and new generated partitions */ +CREATE TRIGGER print_new_row_before_insert BEFORE INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_before_change(); +CREATE TRIGGER print_new_row_after_insert AFTER INSERT ON test_inserts.storage_1 + FOR EACH ROW EXECUTE PROCEDURE test_inserts.print_cols_after_change(); +/* set partition init callback that will add triggers to partitions */ +CREATE OR REPLACE FUNCTION test_inserts.set_triggers(args jsonb) RETURNS VOID AS $$ +BEGIN + EXECUTE format('create trigger print_new_row_before_insert before insert on %s.%s + for each row execute procedure test_inserts.print_cols_before_change();', + args->>'partition_schema', args->>'partition'); + EXECUTE format('create trigger print_new_row_after_insert after insert on %s.%s + for each row execute procedure test_inserts.print_cols_after_change();', + args->>'partition_schema', args->>'partition'); +END; +$$ LANGUAGE plpgsql; +SELECT set_init_callback('test_inserts.storage', 'test_inserts.set_triggers(jsonb)'); + set_init_callback +------------------- + +(1 row) + +/* we don't support ON CONLICT */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_1') +ON CONFLICT (a) DO UPDATE SET a = 3; +ERROR: ON CONFLICT clause is not supported with partitioned tables +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'UNSUPPORTED_2') +ON CONFLICT (a) DO NOTHING; +ERROR: ON CONFLICT clause is not supported with partitioned tables +/* implicitly prepend a partition (no columns have been dropped yet) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'PREPEND.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,PREPEND.) + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+---------- + 0 | 0 | 0 | PREPEND. +(1 row) + +INSERT INTO test_inserts.storage VALUES(1, 0, 0, 'PREPEND..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (1,0,0,PREPEND..) + tableoid +------------------------- + test_inserts.storage_11 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+----------- + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. +(2 rows) + +INSERT INTO test_inserts.storage VALUES(3, 0, 0, 'PREPEND...') RETURNING a + b / 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (3,0,0,PREPEND...) + ?column? +---------- + 3 +(1 row) + +SELECT * FROM test_inserts.storage_11; + a | b | c | d +---+---+---+------------ + 0 | 0 | 0 | PREPEND. + 1 | 0 | 0 | PREPEND.. + 3 | 0 | 0 | PREPEND... +(3 rows) + +/* cause an unique index conflict (a = 0) */ +INSERT INTO test_inserts.storage VALUES(0, 0, 0, 'CONFLICT') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0,CONFLICT) +ERROR: duplicate key value violates unique constraint "storage_11_a_idx" +/* drop first column */ +ALTER TABLE test_inserts.storage DROP COLUMN a CASCADE; +/* will have 3 columns (b, c, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_12 +(1 row) + +INSERT INTO test_inserts.storage (b, c, d) VALUES (101, 17, '3 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,17,"3 cols!") +SELECT * FROM test_inserts.storage_12; /* direct access */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 100; /* via parent */ + b | c | d +-----+----+--------- + 101 | 17 | 3 cols! +(1 row) + +/* spawn a new partition (b, c, d) */ +INSERT INTO test_inserts.storage (b, c, d) VALUES (111, 17, '3 cols as well!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,17,"3 cols as well!") +SELECT * FROM test_inserts.storage_13; /* direct access */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 110; /* via parent */ + b | c | d +-----+----+----------------- + 111 | 17 | 3 cols as well! +(1 row) + +/* column 'a' has been dropped */ +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1.') RETURNING *, 17; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1.) + b | c | d | ?column? +-----+---+-------------+---------- + 111 | 0 | DROP_COL_1. | 17 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1..) + tableoid +------------------------- + test_inserts.storage_13 +(1 row) + +INSERT INTO test_inserts.storage VALUES(111, 0, 'DROP_COL_1...') RETURNING b * 2, b; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,0,DROP_COL_1...) + ?column? | b +----------+----- + 222 | 111 +(1 row) + +/* drop third column */ +ALTER TABLE test_inserts.storage DROP COLUMN c CASCADE; +/* will have 2 columns (b, d) */ +SELECT append_range_partition('test_inserts.storage'); + append_range_partition +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage (b, d) VALUES (121, '2 cols!'); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,"2 cols!") +SELECT * FROM test_inserts.storage_14; /* direct access */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +SELECT * FROM test_inserts.storage WHERE b > 120; /* via parent */ + b | d +-----+--------- + 121 | 2 cols! +(1 row) + +/* column 'c' has been dropped */ +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2.') RETURNING *; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2.) + b | d +-----+------------- + 121 | DROP_COL_2. +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2..') RETURNING tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2..) + tableoid +------------------------- + test_inserts.storage_14 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'DROP_COL_2...') RETURNING d || '0_0', b * 3; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,DROP_COL_2...) + ?column? | ?column? +------------------+---------- + DROP_COL_2...0_0 | 363 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_1') +RETURNING (SELECT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_1) + ?column? +---------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_2') +RETURNING (SELECT generate_series(1, 10) LIMIT 1); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_2) + generate_series +----------------- + 1 +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_3') +RETURNING (SELECT get_partition_key('test_inserts.storage')); +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_3) + get_partition_key +------------------- + b +(1 row) + +INSERT INTO test_inserts.storage VALUES(121, 'query_4') +RETURNING 1, 2, 3, 4; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (121,query_4) + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + 1 | 2 | 3 | 4 +(1 row) + +/* show number of columns in each partition */ +SELECT partition, range_min, range_max, count(partition) +FROM pathman_partition_list JOIN pg_attribute ON partition = attrelid +WHERE attnum > 0 +GROUP BY partition, range_min, range_max +ORDER BY range_min::INT4; + partition | range_min | range_max | count +-------------------------+-----------+-----------+------- + test_inserts.storage_11 | -9 | 1 | 4 + test_inserts.storage_1 | 1 | 11 | 4 + test_inserts.storage_2 | 11 | 21 | 4 + test_inserts.storage_3 | 21 | 31 | 4 + test_inserts.storage_4 | 31 | 41 | 4 + test_inserts.storage_5 | 41 | 51 | 4 + test_inserts.storage_6 | 51 | 61 | 4 + test_inserts.storage_7 | 61 | 71 | 4 + test_inserts.storage_8 | 71 | 81 | 4 + test_inserts.storage_9 | 81 | 91 | 4 + test_inserts.storage_10 | 91 | 101 | 4 + test_inserts.storage_12 | 101 | 111 | 3 + test_inserts.storage_13 | 111 | 121 | 3 + test_inserts.storage_14 | 121 | 131 | 2 +(14 rows) + +/* check the data */ +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----------------+------------------------- + 0 | PREPEND. | test_inserts.storage_11 + 0 | PREPEND.. | test_inserts.storage_11 + 0 | PREPEND... | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 3 cols! | test_inserts.storage_12 + 111 | 3 cols as well! | test_inserts.storage_13 + 111 | DROP_COL_1. | test_inserts.storage_13 + 111 | DROP_COL_1.. | test_inserts.storage_13 + 111 | DROP_COL_1... | test_inserts.storage_13 + 121 | 2 cols! | test_inserts.storage_14 + 121 | DROP_COL_2. | test_inserts.storage_14 + 121 | DROP_COL_2.. | test_inserts.storage_14 + 121 | DROP_COL_2... | test_inserts.storage_14 + 121 | query_1 | test_inserts.storage_14 + 121 | query_2 | test_inserts.storage_14 + 121 | query_3 | test_inserts.storage_14 + 121 | query_4 | test_inserts.storage_14 +(116 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* one more time! */ +INSERT INTO test_inserts.storage (b, d) SELECT i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | tableoid +-----+-----+------------------------- + -2 | -2 | test_inserts.storage_11 + -1 | -1 | test_inserts.storage_11 + 0 | 0 | test_inserts.storage_11 + 1 | 1 | test_inserts.storage_1 + 2 | 2 | test_inserts.storage_1 + 3 | 3 | test_inserts.storage_1 + 4 | 4 | test_inserts.storage_1 + 5 | 5 | test_inserts.storage_1 + 6 | 6 | test_inserts.storage_1 + 7 | 7 | test_inserts.storage_1 + 8 | 8 | test_inserts.storage_1 + 9 | 9 | test_inserts.storage_1 + 10 | 10 | test_inserts.storage_1 + 11 | 11 | test_inserts.storage_2 + 12 | 12 | test_inserts.storage_2 + 13 | 13 | test_inserts.storage_2 + 14 | 14 | test_inserts.storage_2 + 15 | 15 | test_inserts.storage_2 + 16 | 16 | test_inserts.storage_2 + 17 | 17 | test_inserts.storage_2 + 18 | 18 | test_inserts.storage_2 + 19 | 19 | test_inserts.storage_2 + 20 | 20 | test_inserts.storage_2 + 21 | 21 | test_inserts.storage_3 + 22 | 22 | test_inserts.storage_3 + 23 | 23 | test_inserts.storage_3 + 24 | 24 | test_inserts.storage_3 + 25 | 25 | test_inserts.storage_3 + 26 | 26 | test_inserts.storage_3 + 27 | 27 | test_inserts.storage_3 + 28 | 28 | test_inserts.storage_3 + 29 | 29 | test_inserts.storage_3 + 30 | 30 | test_inserts.storage_3 + 31 | 31 | test_inserts.storage_4 + 32 | 32 | test_inserts.storage_4 + 33 | 33 | test_inserts.storage_4 + 34 | 34 | test_inserts.storage_4 + 35 | 35 | test_inserts.storage_4 + 36 | 36 | test_inserts.storage_4 + 37 | 37 | test_inserts.storage_4 + 38 | 38 | test_inserts.storage_4 + 39 | 39 | test_inserts.storage_4 + 40 | 40 | test_inserts.storage_4 + 41 | 41 | test_inserts.storage_5 + 42 | 42 | test_inserts.storage_5 + 43 | 43 | test_inserts.storage_5 + 44 | 44 | test_inserts.storage_5 + 45 | 45 | test_inserts.storage_5 + 46 | 46 | test_inserts.storage_5 + 47 | 47 | test_inserts.storage_5 + 48 | 48 | test_inserts.storage_5 + 49 | 49 | test_inserts.storage_5 + 50 | 50 | test_inserts.storage_5 + 51 | 51 | test_inserts.storage_6 + 52 | 52 | test_inserts.storage_6 + 53 | 53 | test_inserts.storage_6 + 54 | 54 | test_inserts.storage_6 + 55 | 55 | test_inserts.storage_6 + 56 | 56 | test_inserts.storage_6 + 57 | 57 | test_inserts.storage_6 + 58 | 58 | test_inserts.storage_6 + 59 | 59 | test_inserts.storage_6 + 60 | 60 | test_inserts.storage_6 + 61 | 61 | test_inserts.storage_7 + 62 | 62 | test_inserts.storage_7 + 63 | 63 | test_inserts.storage_7 + 64 | 64 | test_inserts.storage_7 + 65 | 65 | test_inserts.storage_7 + 66 | 66 | test_inserts.storage_7 + 67 | 67 | test_inserts.storage_7 + 68 | 68 | test_inserts.storage_7 + 69 | 69 | test_inserts.storage_7 + 70 | 70 | test_inserts.storage_7 + 71 | 71 | test_inserts.storage_8 + 72 | 72 | test_inserts.storage_8 + 73 | 73 | test_inserts.storage_8 + 74 | 74 | test_inserts.storage_8 + 75 | 75 | test_inserts.storage_8 + 76 | 76 | test_inserts.storage_8 + 77 | 77 | test_inserts.storage_8 + 78 | 78 | test_inserts.storage_8 + 79 | 79 | test_inserts.storage_8 + 80 | 80 | test_inserts.storage_8 + 81 | 81 | test_inserts.storage_9 + 82 | 82 | test_inserts.storage_9 + 83 | 83 | test_inserts.storage_9 + 84 | 84 | test_inserts.storage_9 + 85 | 85 | test_inserts.storage_9 + 86 | 86 | test_inserts.storage_9 + 87 | 87 | test_inserts.storage_9 + 88 | 88 | test_inserts.storage_9 + 89 | 89 | test_inserts.storage_9 + 90 | 90 | test_inserts.storage_9 + 91 | 91 | test_inserts.storage_10 + 92 | 92 | test_inserts.storage_10 + 93 | 93 | test_inserts.storage_10 + 94 | 94 | test_inserts.storage_10 + 95 | 95 | test_inserts.storage_10 + 96 | 96 | test_inserts.storage_10 + 97 | 97 | test_inserts.storage_10 + 98 | 98 | test_inserts.storage_10 + 99 | 99 | test_inserts.storage_10 + 100 | 100 | test_inserts.storage_10 + 101 | 101 | test_inserts.storage_12 + 102 | 102 | test_inserts.storage_12 + 103 | 103 | test_inserts.storage_12 + 104 | 104 | test_inserts.storage_12 + 105 | 105 | test_inserts.storage_12 + 106 | 106 | test_inserts.storage_12 + 107 | 107 | test_inserts.storage_12 + 108 | 108 | test_inserts.storage_12 + 109 | 109 | test_inserts.storage_12 + 110 | 110 | test_inserts.storage_12 + 111 | 111 | test_inserts.storage_13 + 112 | 112 | test_inserts.storage_13 + 113 | 113 | test_inserts.storage_13 + 114 | 114 | test_inserts.storage_13 + 115 | 115 | test_inserts.storage_13 + 116 | 116 | test_inserts.storage_13 + 117 | 117 | test_inserts.storage_13 + 118 | 118 | test_inserts.storage_13 + 119 | 119 | test_inserts.storage_13 + 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* add new column */ +ALTER TABLE test_inserts.storage ADD COLUMN e INT8 NOT NULL; +/* one more time! x2 */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i FROM generate_series(-2, 120) i; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-1,-1,-1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (0,0,0) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (1,1,1) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (2,2,2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (4,4,4) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (5,5,5) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (6,6,6) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (7,7,7) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (9,9,9) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (10,10,10) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (101,101,101) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (102,102,102) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (104,104,104) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (105,105,105) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (106,106,106) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (107,107,107) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (109,109,109) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (110,110,110) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (111,111,111) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (112,112,112) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (114,114,114) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (115,115,115) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (116,116,116) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (117,117,117) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (119,119,119) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (120,120,120) +SELECT *, tableoid::regclass FROM test_inserts.storage ORDER BY b, d; + b | d | e | tableoid +-----+-----+-----+------------------------- + -2 | -2 | -2 | test_inserts.storage_11 + -1 | -1 | -1 | test_inserts.storage_11 + 0 | 0 | 0 | test_inserts.storage_11 + 1 | 1 | 1 | test_inserts.storage_1 + 2 | 2 | 2 | test_inserts.storage_1 + 3 | 3 | 3 | test_inserts.storage_1 + 4 | 4 | 4 | test_inserts.storage_1 + 5 | 5 | 5 | test_inserts.storage_1 + 6 | 6 | 6 | test_inserts.storage_1 + 7 | 7 | 7 | test_inserts.storage_1 + 8 | 8 | 8 | test_inserts.storage_1 + 9 | 9 | 9 | test_inserts.storage_1 + 10 | 10 | 10 | test_inserts.storage_1 + 11 | 11 | 11 | test_inserts.storage_2 + 12 | 12 | 12 | test_inserts.storage_2 + 13 | 13 | 13 | test_inserts.storage_2 + 14 | 14 | 14 | test_inserts.storage_2 + 15 | 15 | 15 | test_inserts.storage_2 + 16 | 16 | 16 | test_inserts.storage_2 + 17 | 17 | 17 | test_inserts.storage_2 + 18 | 18 | 18 | test_inserts.storage_2 + 19 | 19 | 19 | test_inserts.storage_2 + 20 | 20 | 20 | test_inserts.storage_2 + 21 | 21 | 21 | test_inserts.storage_3 + 22 | 22 | 22 | test_inserts.storage_3 + 23 | 23 | 23 | test_inserts.storage_3 + 24 | 24 | 24 | test_inserts.storage_3 + 25 | 25 | 25 | test_inserts.storage_3 + 26 | 26 | 26 | test_inserts.storage_3 + 27 | 27 | 27 | test_inserts.storage_3 + 28 | 28 | 28 | test_inserts.storage_3 + 29 | 29 | 29 | test_inserts.storage_3 + 30 | 30 | 30 | test_inserts.storage_3 + 31 | 31 | 31 | test_inserts.storage_4 + 32 | 32 | 32 | test_inserts.storage_4 + 33 | 33 | 33 | test_inserts.storage_4 + 34 | 34 | 34 | test_inserts.storage_4 + 35 | 35 | 35 | test_inserts.storage_4 + 36 | 36 | 36 | test_inserts.storage_4 + 37 | 37 | 37 | test_inserts.storage_4 + 38 | 38 | 38 | test_inserts.storage_4 + 39 | 39 | 39 | test_inserts.storage_4 + 40 | 40 | 40 | test_inserts.storage_4 + 41 | 41 | 41 | test_inserts.storage_5 + 42 | 42 | 42 | test_inserts.storage_5 + 43 | 43 | 43 | test_inserts.storage_5 + 44 | 44 | 44 | test_inserts.storage_5 + 45 | 45 | 45 | test_inserts.storage_5 + 46 | 46 | 46 | test_inserts.storage_5 + 47 | 47 | 47 | test_inserts.storage_5 + 48 | 48 | 48 | test_inserts.storage_5 + 49 | 49 | 49 | test_inserts.storage_5 + 50 | 50 | 50 | test_inserts.storage_5 + 51 | 51 | 51 | test_inserts.storage_6 + 52 | 52 | 52 | test_inserts.storage_6 + 53 | 53 | 53 | test_inserts.storage_6 + 54 | 54 | 54 | test_inserts.storage_6 + 55 | 55 | 55 | test_inserts.storage_6 + 56 | 56 | 56 | test_inserts.storage_6 + 57 | 57 | 57 | test_inserts.storage_6 + 58 | 58 | 58 | test_inserts.storage_6 + 59 | 59 | 59 | test_inserts.storage_6 + 60 | 60 | 60 | test_inserts.storage_6 + 61 | 61 | 61 | test_inserts.storage_7 + 62 | 62 | 62 | test_inserts.storage_7 + 63 | 63 | 63 | test_inserts.storage_7 + 64 | 64 | 64 | test_inserts.storage_7 + 65 | 65 | 65 | test_inserts.storage_7 + 66 | 66 | 66 | test_inserts.storage_7 + 67 | 67 | 67 | test_inserts.storage_7 + 68 | 68 | 68 | test_inserts.storage_7 + 69 | 69 | 69 | test_inserts.storage_7 + 70 | 70 | 70 | test_inserts.storage_7 + 71 | 71 | 71 | test_inserts.storage_8 + 72 | 72 | 72 | test_inserts.storage_8 + 73 | 73 | 73 | test_inserts.storage_8 + 74 | 74 | 74 | test_inserts.storage_8 + 75 | 75 | 75 | test_inserts.storage_8 + 76 | 76 | 76 | test_inserts.storage_8 + 77 | 77 | 77 | test_inserts.storage_8 + 78 | 78 | 78 | test_inserts.storage_8 + 79 | 79 | 79 | test_inserts.storage_8 + 80 | 80 | 80 | test_inserts.storage_8 + 81 | 81 | 81 | test_inserts.storage_9 + 82 | 82 | 82 | test_inserts.storage_9 + 83 | 83 | 83 | test_inserts.storage_9 + 84 | 84 | 84 | test_inserts.storage_9 + 85 | 85 | 85 | test_inserts.storage_9 + 86 | 86 | 86 | test_inserts.storage_9 + 87 | 87 | 87 | test_inserts.storage_9 + 88 | 88 | 88 | test_inserts.storage_9 + 89 | 89 | 89 | test_inserts.storage_9 + 90 | 90 | 90 | test_inserts.storage_9 + 91 | 91 | 91 | test_inserts.storage_10 + 92 | 92 | 92 | test_inserts.storage_10 + 93 | 93 | 93 | test_inserts.storage_10 + 94 | 94 | 94 | test_inserts.storage_10 + 95 | 95 | 95 | test_inserts.storage_10 + 96 | 96 | 96 | test_inserts.storage_10 + 97 | 97 | 97 | test_inserts.storage_10 + 98 | 98 | 98 | test_inserts.storage_10 + 99 | 99 | 99 | test_inserts.storage_10 + 100 | 100 | 100 | test_inserts.storage_10 + 101 | 101 | 101 | test_inserts.storage_12 + 102 | 102 | 102 | test_inserts.storage_12 + 103 | 103 | 103 | test_inserts.storage_12 + 104 | 104 | 104 | test_inserts.storage_12 + 105 | 105 | 105 | test_inserts.storage_12 + 106 | 106 | 106 | test_inserts.storage_12 + 107 | 107 | 107 | test_inserts.storage_12 + 108 | 108 | 108 | test_inserts.storage_12 + 109 | 109 | 109 | test_inserts.storage_12 + 110 | 110 | 110 | test_inserts.storage_12 + 111 | 111 | 111 | test_inserts.storage_13 + 112 | 112 | 112 | test_inserts.storage_13 + 113 | 113 | 113 | test_inserts.storage_13 + 114 | 114 | 114 | test_inserts.storage_13 + 115 | 115 | 115 | test_inserts.storage_13 + 116 | 116 | 116 | test_inserts.storage_13 + 117 | 117 | 117 | test_inserts.storage_13 + 118 | 118 | 118 | test_inserts.storage_13 + 119 | 119 | 119 | test_inserts.storage_13 + 120 | 120 | 120 | test_inserts.storage_13 +(123 rows) + +/* drop data */ +TRUNCATE test_inserts.storage; +/* now test RETURNING list using our new column 'e' */ +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(-2, 130, 5) i +RETURNING e * 2, b, tableoid::regclass; +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: BEFORE INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_11 HAS EXPIRED. INSERTED ROW: (-2,-2,-2) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (3,3,3) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_1 HAS EXPIRED. INSERTED ROW: (8,8,8) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (103,103,103) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_12 HAS EXPIRED. INSERTED ROW: (108,108,108) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (113,113,113) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_13 HAS EXPIRED. INSERTED ROW: (118,118,118) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (123,123,123) +NOTICE: AFTER INSERTION TRIGGER ON TABLE storage_14 HAS EXPIRED. INSERTED ROW: (128,128,128) + ?column? | b | tableoid +----------+-----+------------------------- + -4 | -2 | test_inserts.storage_11 + 6 | 3 | test_inserts.storage_1 + 16 | 8 | test_inserts.storage_1 + 26 | 13 | test_inserts.storage_2 + 36 | 18 | test_inserts.storage_2 + 46 | 23 | test_inserts.storage_3 + 56 | 28 | test_inserts.storage_3 + 66 | 33 | test_inserts.storage_4 + 76 | 38 | test_inserts.storage_4 + 86 | 43 | test_inserts.storage_5 + 96 | 48 | test_inserts.storage_5 + 106 | 53 | test_inserts.storage_6 + 116 | 58 | test_inserts.storage_6 + 126 | 63 | test_inserts.storage_7 + 136 | 68 | test_inserts.storage_7 + 146 | 73 | test_inserts.storage_8 + 156 | 78 | test_inserts.storage_8 + 166 | 83 | test_inserts.storage_9 + 176 | 88 | test_inserts.storage_9 + 186 | 93 | test_inserts.storage_10 + 196 | 98 | test_inserts.storage_10 + 206 | 103 | test_inserts.storage_12 + 216 | 108 | test_inserts.storage_12 + 226 | 113 | test_inserts.storage_13 + 236 | 118 | test_inserts.storage_13 + 246 | 123 | test_inserts.storage_14 + 256 | 128 | test_inserts.storage_14 +(27 rows) + +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on test_inserts.storage + Output: (storage.e * 2), storage.b, (storage.tableoid)::regclass + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, NULL::integer, NULL::integer, storage.d, storage.e + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, NULL::integer, NULL::integer, i.i, i.i + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Function Scan on pg_catalog.generate_series i + Output: NULL::integer, i.i, NULL::integer, NULL::text, NULL::bigint + Function Call: generate_series(1, 10) +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, storage.e + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, storage_1.e + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b, storage_2.d, storage_2.e + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b, storage_3.d, storage_3.e + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b, storage_4.d, storage_4.e + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b, storage_5.d, storage_5.e + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b, storage_6.d, storage_6.e + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b, storage_7.d, storage_7.e + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b, storage_8.d, storage_8.e + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b, storage_9.d, storage_9.e + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b, storage_10.d, storage_10.e + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d, storage_11.e + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b, storage_12.d, storage_12.e + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b, storage_13.d, storage_13.e + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b, storage_14.d, storage_14.e + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d, storage_15.e +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, storage.d, NULL::bigint + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, storage_1.d, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b, storage_2.d + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b, storage_3.d + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b, storage_4.d + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b, storage_5.d + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b, storage_6.d + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b, storage_7.d + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b, storage_8.d + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b, storage_9.d + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b, storage_10.d + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b, storage_11.d + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b, storage_12.d + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b, storage_13.d + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b, storage_14.d + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b, storage_15.d +(34 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + QUERY PLAN +------------------------------------------------------------------------------------------- + Insert on test_inserts.storage + -> Custom Scan (PartitionFilter) + Output: NULL::integer, storage.b, NULL::integer, NULL::text, NULL::bigint + -> Result + Output: NULL::integer, storage_1.b, NULL::integer, NULL::text, NULL::bigint + -> Append + -> Seq Scan on test_inserts.storage_11 storage_2 + Output: storage_2.b + -> Seq Scan on test_inserts.storage_1 storage_3 + Output: storage_3.b + -> Seq Scan on test_inserts.storage_2 storage_4 + Output: storage_4.b + -> Seq Scan on test_inserts.storage_3 storage_5 + Output: storage_5.b + -> Seq Scan on test_inserts.storage_4 storage_6 + Output: storage_6.b + -> Seq Scan on test_inserts.storage_5 storage_7 + Output: storage_7.b + -> Seq Scan on test_inserts.storage_6 storage_8 + Output: storage_8.b + -> Seq Scan on test_inserts.storage_7 storage_9 + Output: storage_9.b + -> Seq Scan on test_inserts.storage_8 storage_10 + Output: storage_10.b + -> Seq Scan on test_inserts.storage_9 storage_11 + Output: storage_11.b + -> Seq Scan on test_inserts.storage_10 storage_12 + Output: storage_12.b + -> Seq Scan on test_inserts.storage_12 storage_13 + Output: storage_13.b + -> Seq Scan on test_inserts.storage_13 storage_14 + Output: storage_14.b + -> Seq Scan on test_inserts.storage_14 storage_15 + Output: storage_15.b +(34 rows) + +/* test gap case (missing partition in between) */ +CREATE TABLE test_inserts.test_gap(val INT NOT NULL); +INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); +SELECT create_range_partitions('test_inserts.test_gap', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +DROP TABLE test_inserts.test_gap_2; /* make a gap */ +INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ +ERROR: cannot spawn a partition +DROP TABLE test_inserts.test_gap CASCADE; +NOTICE: drop cascades to 3 other objects +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_1; +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; + count +------- + 0 +(1 row) + +DROP TABLE test_inserts.special_2; +DROP TABLE test_inserts.test_special_only CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test_inserts.storage CASCADE; +NOTICE: drop cascades to 15 other objects +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_interval.out b/expected/pathman_interval.out index 1bcd8216..e4741522 100644 --- a/expected/pathman_interval.out +++ b/expected/pathman_interval.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; /* Range partitions for INT2 type */ @@ -270,5 +271,5 @@ SELECT set_interval('test_interval.abc', NULL::INTEGER); ERROR: table "test_interval.abc" is not partitioned by RANGE DROP TABLE test_interval.abc CASCADE; NOTICE: drop cascades to 3 other objects -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_join_clause.out b/expected/pathman_join_clause.out index 7d9acdea..7654d4ca 100644 --- a/expected/pathman_join_clause.out +++ b/expected/pathman_join_clause.out @@ -1,4 +1,9 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; @@ -166,7 +171,13 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); 4 | 3 | | (2 rows) -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 15 other objects +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_1.out b/expected/pathman_join_clause_1.out new file mode 100644 index 00000000..d65131c7 --- /dev/null +++ b/expected/pathman_join_clause_1.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child_1.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_2.out b/expected/pathman_join_clause_2.out new file mode 100644 index 00000000..df2ea0a5 --- /dev/null +++ b/expected/pathman_join_clause_2.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Seq Scan on mytbl_0 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_3.out b/expected/pathman_join_clause_3.out new file mode 100644 index 00000000..80b8de4c --- /dev/null +++ b/expected/pathman_join_clause_3.out @@ -0,0 +1,182 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (fk.id1 = m.id1) + -> Bitmap Heap Scan on mytbl_0 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_0_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_1 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_1_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_2 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_2_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_3 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_3_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_4 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_4_pkey + Index Cond: (id1 = fk.id1) + -> Bitmap Heap Scan on mytbl_5 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_5_pkey + Index Cond: (id1 = fk.id1) + -> Seq Scan on mytbl_6 m + Filter: ((fk.id1 = id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Heap Scan on mytbl_7 m + Recheck Cond: (id1 = fk.id1) + Filter: ((fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Bitmap Index Scan on mytbl_7_pkey + Index Cond: (id1 = fk.id1) +(41 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_4.out b/expected/pathman_join_clause_4.out new file mode 100644 index 00000000..17791fb9 --- /dev/null +++ b/expected/pathman_join_clause_4.out @@ -0,0 +1,161 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Custom Scan (RuntimeAppend) + Prune by: ((child.owner_id = 3) AND (child.owner_id = parent.owner_id)) + -> Seq Scan on child_1 child + Filter: ((owner_id = 3) AND (owner_id = parent.owner_id) AND (parent_id = parent.id)) +(7 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_join_clause_5.out b/expected/pathman_join_clause_5.out new file mode 100644 index 00000000..179f50f7 --- /dev/null +++ b/expected/pathman_join_clause_5.out @@ -0,0 +1,160 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test push down a join clause into child nodes of append + */ +/* create test tables */ +CREATE TABLE test.fk ( + id1 INT NOT NULL, + id2 INT NOT NULL, + start_key INT, + end_key INT, + PRIMARY KEY (id1, id2)); +CREATE TABLE test.mytbl ( + id1 INT NOT NULL, + id2 INT NOT NULL, + key INT NOT NULL, + CONSTRAINT fk_fk FOREIGN KEY (id1, id2) REFERENCES test.fk(id1, id2), + PRIMARY KEY (id1, key)); +SELECT pathman.create_hash_partitions('test.mytbl', 'id1', 8); + create_hash_partitions +------------------------ + 8 +(1 row) + +/* ...fill out with test data */ +INSERT INTO test.fk VALUES (1, 1); +INSERT INTO test.mytbl VALUES (1, 1, 5), (1, 1, 6); +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* run test queries */ +EXPLAIN (COSTS OFF) /* test plan */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on fk + -> Custom Scan (RuntimeAppend) + Prune by: (m.id1 = fk.id1) + -> Seq Scan on mytbl_0 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_1 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_2 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_3 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_4 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_5 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_6 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) + -> Seq Scan on mytbl_7 m + Filter: ((id1 = fk.id1) AND (fk.id2 = id2) AND (NOT (key <@ int4range(6, fk.end_key)))) +(20 rows) + +/* test joint data */ +SELECT m.tableoid::regclass, id1, id2, key, start_key, end_key +FROM test.mytbl m JOIN test.fk USING(id1, id2) +WHERE NOT key <@ int4range(6, end_key); + tableoid | id1 | id2 | key | start_key | end_key +--------------+-----+-----+-----+-----------+--------- + test.mytbl_6 | 1 | 1 | 5 | | +(1 row) + +/* + * Test case by @dimarick + */ +CREATE TABLE test.parent ( + id SERIAL NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +CREATE TABLE test.child_nopart ( + parent_id INTEGER NOT NULL, + owner_id INTEGER NOT NULL +); +INSERT INTO test.parent (owner_id) VALUES (1), (2), (3), (3); +INSERT INTO test.child (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +INSERT INTO test.child_nopart (parent_id, owner_id) VALUES (1, 1), (2, 2), (3, 3), (5, 3); +SELECT pathman.create_hash_partitions('test.child', 'owner_id', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +/* gather statistics on test tables to have deterministic plans */ +ANALYZE; +/* Query #1 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = test.parent.owner_id +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +/* Query #2 */ +EXPLAIN (COSTS OFF) SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (child.parent_id = parent.id) + -> Seq Scan on parent + Filter: ((id = ANY ('{3,4}'::integer[])) AND (owner_id = 3)) + -> Seq Scan on child_1 child + Filter: (owner_id = 3) +(6 rows) + +SELECT * FROM test.parent +LEFT JOIN test.child ON test.child.parent_id = test.parent.id AND + test.child.owner_id = 3 +WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); + id | owner_id | parent_id | owner_id +----+----------+-----------+---------- + 3 | 3 | 3 | 3 + 4 | 3 | | +(2 rows) + +DROP TABLE test.child CASCADE; +NOTICE: drop cascades to 2 other objects +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +NOTICE: drop cascades to 8 other objects +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_lateral.out b/expected/pathman_lateral.out index 808a4d64..53edc3d2 100644 --- a/expected/pathman_lateral.out +++ b/expected/pathman_lateral.out @@ -1,3 +1,10 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -27,20 +34,22 @@ select * from where t1.id = t2.id and t.id = t3.id); QUERY PLAN -------------------------------------------------------------------------------------------- - Nested Loop Semi Join + Nested Loop -> Nested Loop - Join Filter: ((t2.id + t1.id) = t3.id) - -> Append - -> Seq Scan on data_0 t3 - -> Seq Scan on data_1 t3_1 - -> Seq Scan on data_2 t3_2 - -> Seq Scan on data_3 t3_3 - -> Seq Scan on data_4 t3_4 - -> Seq Scan on data_5 t3_5 - -> Seq Scan on data_6 t3_6 - -> Seq Scan on data_7 t3_7 - -> Seq Scan on data_8 t3_8 - -> Seq Scan on data_9 t3_9 + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 -> Materialize -> Nested Loop Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) @@ -88,31 +97,32 @@ select * from -> Seq Scan on data_9 t1_9 Filter: ((id >= 1) AND (id <= 100)) -> Custom Scan (RuntimeAppend) - Prune by: (t3.id = t.id) - -> Seq Scan on data_0 t - Filter: (t3.id = id) - -> Seq Scan on data_1 t - Filter: (t3.id = id) - -> Seq Scan on data_2 t - Filter: (t3.id = id) - -> Seq Scan on data_3 t - Filter: (t3.id = id) - -> Seq Scan on data_4 t - Filter: (t3.id = id) - -> Seq Scan on data_5 t - Filter: (t3.id = id) - -> Seq Scan on data_6 t - Filter: (t3.id = id) - -> Seq Scan on data_7 t - Filter: (t3.id = id) - -> Seq Scan on data_8 t - Filter: (t3.id = id) - -> Seq Scan on data_9 t - Filter: (t3.id = id) -(82 rows) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_1.out b/expected/pathman_lateral_1.out new file mode 100644 index 00000000..12995290 --- /dev/null +++ b/expected/pathman_lateral_1.out @@ -0,0 +1,122 @@ +-- Sometimes join selectivity improvements patches in pgpro force nested loop +-- members swap -- in pathman_lateral_1.out +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t + -> Seq Scan on data_1 t_1 + -> Seq Scan on data_2 t_2 + -> Seq Scan on data_3 t_3 + -> Seq Scan on data_4 t_4 + -> Seq Scan on data_5 t_5 + -> Seq Scan on data_6 t_6 + -> Seq Scan on data_7 t_7 + -> Seq Scan on data_8 t_8 + -> Seq Scan on data_9 t_9 + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_2.out b/expected/pathman_lateral_2.out new file mode 100644 index 00000000..e4a64a56 --- /dev/null +++ b/expected/pathman_lateral_2.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_3.out b/expected/pathman_lateral_3.out new file mode 100644 index 00000000..4bc385de --- /dev/null +++ b/expected/pathman_lateral_3.out @@ -0,0 +1,127 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2_1.id + t1_1.id) = t_1.id) + -> Nested Loop + Join Filter: ((t2_1.id > t1_1.id) AND (t1_1.id > t2_1.id) AND (t1_1.id = t2_1.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> HashAggregate + Group Key: t_1.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Custom Scan (RuntimeAppend) + Prune by: (t_1.id = t3.id) + -> Seq Scan on data_0 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_1 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_2 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_3 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_4 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_5 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_6 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_7 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_8 t3 + Filter: (t_1.id = id) + -> Seq Scan on data_9 t3 + Filter: (t_1.id = id) +(83 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_lateral_4.out b/expected/pathman_lateral_4.out new file mode 100644 index 00000000..d35da608 --- /dev/null +++ b/expected/pathman_lateral_4.out @@ -0,0 +1,128 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_lateral; +/* create table partitioned by HASH */ +create table test_lateral.data(id int8 not null); +select create_hash_partitions('test_lateral.data', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into test_lateral.data select generate_series(1, 10000); +VACUUM ANALYZE; +set enable_hashjoin = off; +set enable_mergejoin = off; +/* all credits go to Ivan Frolkov */ +explain (costs off) +select * from + test_lateral.data as t1, + lateral(select * from test_lateral.data as t2 where t2.id > t1.id) t2, + lateral(select * from test_lateral.data as t3 where t3.id = t2.id + t1.id) t3 + where t1.id between 1 and 100 and + t2.id between 2 and 299 and + t1.id > t2.id and + exists(select * from test_lateral.data t + where t1.id = t2.id and t.id = t3.id); + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((t2.id + t1.id) = t.id) + -> HashAggregate + Group Key: t.id + -> Append + -> Seq Scan on data_0 t_1 + -> Seq Scan on data_1 t_2 + -> Seq Scan on data_2 t_3 + -> Seq Scan on data_3 t_4 + -> Seq Scan on data_4 t_5 + -> Seq Scan on data_5 t_6 + -> Seq Scan on data_6 t_7 + -> Seq Scan on data_7 t_8 + -> Seq Scan on data_8 t_9 + -> Seq Scan on data_9 t_10 + -> Materialize + -> Nested Loop + Join Filter: ((t2.id > t1.id) AND (t1.id > t2.id) AND (t1.id = t2.id)) + -> Append + -> Seq Scan on data_0 t2_1 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_1 t2_2 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_2 t2_3 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_3 t2_4 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_4 t2_5 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_5 t2_6 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_6 t2_7 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_7 t2_8 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_8 t2_9 + Filter: ((id >= 2) AND (id <= 299)) + -> Seq Scan on data_9 t2_10 + Filter: ((id >= 2) AND (id <= 299)) + -> Materialize + -> Append + -> Seq Scan on data_0 t1_1 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_1 t1_2 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_2 t1_3 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_3 t1_4 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_4 t1_5 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_5 t1_6 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_6 t1_7 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_7 t1_8 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_8 t1_9 + Filter: ((id >= 1) AND (id <= 100)) + -> Seq Scan on data_9 t1_10 + Filter: ((id >= 1) AND (id <= 100)) + -> Custom Scan (RuntimeAppend) + Prune by: (t3.id = t.id) + -> Seq Scan on data_0 t3 + Filter: (t.id = id) + -> Seq Scan on data_1 t3 + Filter: (t.id = id) + -> Seq Scan on data_2 t3 + Filter: (t.id = id) + -> Seq Scan on data_3 t3 + Filter: (t.id = id) + -> Seq Scan on data_4 t3 + Filter: (t.id = id) + -> Seq Scan on data_5 t3 + Filter: (t.id = id) + -> Seq Scan on data_6 t3 + Filter: (t.id = id) + -> Seq Scan on data_7 t3 + Filter: (t.id = id) + -> Seq Scan on data_8 t3 + Filter: (t.id = id) + -> Seq Scan on data_9 t3 + Filter: (t.id = id) +(84 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test_lateral.data CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA test_lateral; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_mergejoin.out b/expected/pathman_mergejoin.out index ff2ae5bb..d8a14371 100644 --- a/expected/pathman_mergejoin.out +++ b/expected/pathman_mergejoin.out @@ -1,3 +1,15 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -35,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -68,7 +82,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_1.out b/expected/pathman_mergejoin_1.out index de87f09b..bcd6c272 100644 --- a/expected/pathman_mergejoin_1.out +++ b/expected/pathman_mergejoin_1.out @@ -1,3 +1,15 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE SCHEMA pathman; @@ -35,6 +47,8 @@ SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -66,7 +80,11 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 12 other objects +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_2.out b/expected/pathman_mergejoin_2.out new file mode 100644 index 00000000..aed697d2 --- /dev/null +++ b/expected/pathman_mergejoin_2.out @@ -0,0 +1,83 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(13 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_3.out b/expected/pathman_mergejoin_3.out new file mode 100644 index 00000000..85414544 --- /dev/null +++ b/expected/pathman_mergejoin_3.out @@ -0,0 +1,81 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_1 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_2 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_3 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_4.out b/expected/pathman_mergejoin_4.out new file mode 100644 index 00000000..fc9bc95f --- /dev/null +++ b/expected/pathman_mergejoin_4.out @@ -0,0 +1,90 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Merge Join + Merge Cond: (j1.id = j2.id) + -> Merge Append + Sort Key: j1.id + -> Index Scan using range_rel_1_pkey on range_rel_1 j1_1 + -> Index Scan using range_rel_2_pkey on range_rel_2 j1_2 + -> Merge Append + Sort Key: j2.id + -> Index Scan using range_rel_2_pkey on range_rel_2 j2_1 + -> Index Scan using range_rel_3_pkey on range_rel_3 j2_2 + -> Index Scan using range_rel_4_pkey on range_rel_4 j2_3 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(20 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_5.out b/expected/pathman_mergejoin_5.out new file mode 100644 index 00000000..b99e40db --- /dev/null +++ b/expected/pathman_mergejoin_5.out @@ -0,0 +1,81 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + Index Cond: (id IS NOT NULL) + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(11 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_mergejoin_6.out b/expected/pathman_mergejoin_6.out new file mode 100644 index 00000000..0cca2aef --- /dev/null +++ b/expected/pathman_mergejoin_6.out @@ -0,0 +1,80 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + create_range_partitions +------------------------- + 4 +(1 row) + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +INSERT INTO test.num_range_rel SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); + create_range_partitions +------------------------- + 4 +(1 row) + +/* + * Merge join between 3 partitioned tables + * + * test case for the fix of sorting, merge append and index scan issues + * details in commit 54dd0486fc55b2d25cf7d095f83dee6ff4adee06 + */ +SET enable_hashjoin = OFF; +SET enable_nestloop = OFF; +SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: j2.dt + -> Merge Join + Merge Cond: (j2.id = j3.id) + -> Index Scan using range_rel_2_pkey on range_rel_2 j2 + -> Append + -> Index Scan using num_range_rel_1_pkey on num_range_rel_1 j3_1 + -> Index Scan using num_range_rel_2_pkey on num_range_rel_2 j3_2 + -> Index Scan using num_range_rel_3_pkey on num_range_rel_3 j3_3 + -> Index Scan using num_range_rel_4_pkey on num_range_rel_4 j3_4 +(10 rows) + +SET enable_hashjoin = ON; +SET enable_nestloop = ON; +SET enable_seqscan = ON; +DROP TABLE test.num_range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 5 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman; +DROP SCHEMA pathman; diff --git a/expected/pathman_only.out b/expected/pathman_only.out index f90dc56e..f44f2256 100644 --- a/expected/pathman_only.out +++ b/expected/pathman_only.out @@ -2,6 +2,32 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; @@ -137,7 +163,34 @@ UNION SELECT * FROM test_only.from_only_test; EXPLAIN (COSTS OFF) SELECT * FROM test_only.from_only_test a JOIN ONLY test_only.from_only_test b USING(val); -ERROR: it is prohibited to apply ONLY modifier to partitioned tables which have already been mentioned without ONLY + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + /* should be OK */ EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM test_only.from_only_test), @@ -237,6 +290,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) (27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_1.out b/expected/pathman_only_1.out index 77fc0dc5..ce6fd127 100644 --- a/expected/pathman_only_1.out +++ b/expected/pathman_only_1.out @@ -2,6 +2,32 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse SET search_path = 'public'; @@ -11,7 +37,6 @@ CREATE SCHEMA test_only; CREATE TABLE test_only.from_only_test(val INT NOT NULL); INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); -NOTICE: sequence "from_only_test_seq" does not exist, skipping create_range_partitions ------------------------- 10 @@ -143,68 +168,93 @@ JOIN ONLY test_only.from_only_test b USING(val); Nested Loop -> Seq Scan on from_only_test b -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) -> Seq Scan on from_only_test_10 a -(13 rows) + Filter: (b.val = val) +(24 rows) /* should be OK */ EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM test_only.from_only_test), q2 AS (SELECT * FROM ONLY test_only.from_only_test) SELECT * FROM q1 JOIN q2 USING(val); - QUERY PLAN ---------------------------------------------- - Hash Join - Hash Cond: (q1.val = q2.val) - CTE q1 - -> Append - -> Seq Scan on from_only_test_1 - -> Seq Scan on from_only_test_2 - -> Seq Scan on from_only_test_3 - -> Seq Scan on from_only_test_4 - -> Seq Scan on from_only_test_5 - -> Seq Scan on from_only_test_6 - -> Seq Scan on from_only_test_7 - -> Seq Scan on from_only_test_8 - -> Seq Scan on from_only_test_9 - -> Seq Scan on from_only_test_10 - CTE q2 - -> Seq Scan on from_only_test - -> CTE Scan on q1 - -> Hash - -> CTE Scan on q2 -(19 rows) + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) /* should be OK */ EXPLAIN (COSTS OFF) WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) SELECT * FROM test_only.from_only_test JOIN q1 USING(val); - QUERY PLAN ----------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------- Nested Loop - CTE q1 - -> Seq Scan on from_only_test from_only_test_1 - -> CTE Scan on q1 + -> Seq Scan on from_only_test from_only_test_1 -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) -> Seq Scan on from_only_test_10 from_only_test -(15 rows) + Filter: (from_only_test_1.val = val) +(24 rows) /* should be OK */ EXPLAIN (COSTS OFF) @@ -215,6 +265,7 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test QUERY PLAN ----------------------------------------------------------------- Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) InitPlan 1 (returns $0) -> Limit -> Sort @@ -240,8 +291,9 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test Filter: (val = $0) -> Seq Scan on from_only_test_10 from_only_test Filter: (val = $0) -(26 rows) +(27 rows) -DROP SCHEMA test_only CASCADE; -NOTICE: drop cascades to 12 other objects +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_2.out b/expected/pathman_only_2.out new file mode 100644 index 00000000..6aeadb76 --- /dev/null +++ b/expected/pathman_only_2.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (b.val = a.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test_1.val = from_only_test.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_3.out b/expected/pathman_only_3.out new file mode 100644 index 00000000..1999309d --- /dev/null +++ b/expected/pathman_only_3.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = $0) + InitPlan 1 (returns $0) + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = $0) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = $0) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_only_4.out b/expected/pathman_only_4.out new file mode 100644 index 00000000..fbcc397c --- /dev/null +++ b/expected/pathman_only_4.out @@ -0,0 +1,299 @@ +/* + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_only; +/* Test special case: ONLY statement with not-ONLY for partitioned table */ +CREATE TABLE test_only.from_only_test(val INT NOT NULL); +INSERT INTO test_only.from_only_test SELECT generate_series(1, 20); +SELECT create_range_partitions('test_only.from_only_test', 'val', 1, 2); + create_range_partitions +------------------------- + 10 +(1 row) + +VACUUM ANALYZE; +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +---------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Seq Scan on from_only_test from_only_test_11 +(15 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM ONLY test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Append + -> Seq Scan on from_only_test_1 + -> Seq Scan on from_only_test_2 + -> Seq Scan on from_only_test_3 + -> Seq Scan on from_only_test_4 + -> Seq Scan on from_only_test_5 + -> Seq Scan on from_only_test_6 + -> Seq Scan on from_only_test_7 + -> Seq Scan on from_only_test_8 + -> Seq Scan on from_only_test_9 + -> Seq Scan on from_only_test_10 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_12 + -> Seq Scan on from_only_test_2 from_only_test_13 + -> Seq Scan on from_only_test_3 from_only_test_14 + -> Seq Scan on from_only_test_4 from_only_test_15 + -> Seq Scan on from_only_test_5 from_only_test_16 + -> Seq Scan on from_only_test_6 from_only_test_17 + -> Seq Scan on from_only_test_7 from_only_test_18 + -> Seq Scan on from_only_test_8 from_only_test_19 + -> Seq Scan on from_only_test_9 from_only_test_20 + -> Seq Scan on from_only_test_10 from_only_test_21 + -> Seq Scan on from_only_test from_only_test_22 +(26 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test +UNION SELECT * FROM test_only.from_only_test; + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: from_only_test.val + -> Append + -> Seq Scan on from_only_test + -> Append + -> Seq Scan on from_only_test_1 from_only_test_2 + -> Seq Scan on from_only_test_2 from_only_test_3 + -> Seq Scan on from_only_test_3 from_only_test_4 + -> Seq Scan on from_only_test_4 from_only_test_5 + -> Seq Scan on from_only_test_5 from_only_test_6 + -> Seq Scan on from_only_test_6 from_only_test_7 + -> Seq Scan on from_only_test_7 from_only_test_8 + -> Seq Scan on from_only_test_8 from_only_test_9 + -> Seq Scan on from_only_test_9 from_only_test_10 + -> Seq Scan on from_only_test_10 from_only_test_11 + -> Append + -> Seq Scan on from_only_test_1 from_only_test_13 + -> Seq Scan on from_only_test_2 from_only_test_14 + -> Seq Scan on from_only_test_3 from_only_test_15 + -> Seq Scan on from_only_test_4 from_only_test_16 + -> Seq Scan on from_only_test_5 from_only_test_17 + -> Seq Scan on from_only_test_6 from_only_test_18 + -> Seq Scan on from_only_test_7 from_only_test_19 + -> Seq Scan on from_only_test_8 from_only_test_20 + -> Seq Scan on from_only_test_9 from_only_test_21 + -> Seq Scan on from_only_test_10 from_only_test_22 +(26 rows) + +/* not ok, ONLY|non-ONLY in one query (this is not the case for PgPro) */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test a +JOIN ONLY test_only.from_only_test b USING(val); + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test b + -> Custom Scan (RuntimeAppend) + Prune by: (a.val = b.val) + -> Seq Scan on from_only_test_1 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_2 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_3 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_4 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_5 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_6 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_7 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_8 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_9 a + Filter: (b.val = val) + -> Seq Scan on from_only_test_10 a + Filter: (b.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM test_only.from_only_test), + q2 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM q1 JOIN q2 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +WITH q1 AS (SELECT * FROM ONLY test_only.from_only_test) +SELECT * FROM test_only.from_only_test JOIN q1 USING(val); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on from_only_test from_only_test_1 + -> Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = from_only_test_1.val) + -> Seq Scan on from_only_test_1 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (from_only_test_1.val = val) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (from_only_test_1.val = val) +(24 rows) + +/* should be OK */ +EXPLAIN (COSTS OFF) +SELECT * FROM test_only.from_only_test +WHERE val = (SELECT val FROM ONLY test_only.from_only_test + ORDER BY val ASC + LIMIT 1); + QUERY PLAN +----------------------------------------------------------------- + Custom Scan (RuntimeAppend) + Prune by: (from_only_test.val = (InitPlan 1).col1) + InitPlan 1 + -> Limit + -> Sort + Sort Key: from_only_test_1.val + -> Seq Scan on from_only_test from_only_test_1 + -> Seq Scan on from_only_test_1 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_2 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_3 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_4 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_5 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_6 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_7 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_8 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_9 from_only_test + Filter: (val = (InitPlan 1).col1) + -> Seq Scan on from_only_test_10 from_only_test + Filter: (val = (InitPlan 1).col1) +(27 rows) + +DROP TABLE test_only.from_only_test CASCADE; +NOTICE: drop cascades to 11 other objects +DROP SCHEMA test_only; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_param_upd_del.out b/expected/pathman_param_upd_del.out index 7419ad29..28fa616d 100644 --- a/expected/pathman_param_upd_del.out +++ b/expected/pathman_param_upd_del.out @@ -68,6 +68,64 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); Filter: (key = 11) (3 rows) +DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(5); + QUERY PLAN +---------------------------- + Update on test_7 + -> Seq Scan on test_7 + Filter: (key = 16) +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE upd(6); + QUERY PLAN +---------------------------- + Update on test_3 + -> Seq Scan on test_3 + Filter: (key = 18) +(3 rows) + DEALLOCATE upd; PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); @@ -127,6 +185,7 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); (3 rows) DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; -NOTICE: drop cascades to 11 other objects +DROP TABLE param_upd_del.test CASCADE; +NOTICE: drop cascades to 10 other objects +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions.out b/expected/pathman_permissions.out index 4700f8bf..a29865d0 100644 --- a/expected/pathman_permissions.out +++ b/expected/pathman_permissions.out @@ -1,89 +1,108 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: permission denied for relation user1_table -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); -ERROR: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); create_range_partitions ------------------------- 2 (1 row) /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; - partrel | expr | parttype | range_interval | cooked_expr --------------------------+------+----------+----------------+----------------------------------------------------------------------------------------------------------------------- - permissions.user1_table | id | 2 | 10 | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location 8} + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 (1 row) SELECT * FROM pathman_config_params; - partrel | enable_parent | auto | init_callback | spawn_using_bgw --------------------------+---------------+------+---------------+----------------- - permissions.user1_table | f | t | | f + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f (1 row) /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" -SELECT set_auto('permissions.user1_table', false); -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" ERROR: new row violates row-level security policy for table "pathman_config_params" /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; -WARNING: only the owner or superuser can change partitioning configuration of table "user1_table" +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" /* No rights to insert, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); -ERROR: permission denied for relation user1_table +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); -ERROR: permission denied for parent relation "user1_table" -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); - prepend_range_partition ---------------------------- - permissions.user1_table_4 +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 (1 row) SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ - attname | attacl -----------+----------------- - a | {user2=w/user1} + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} cmax | cmin | ctid | @@ -94,8 +113,8 @@ ORDER BY attname; /* check ACL for each column */ (8 rows) /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; id | a ----+--- 35 | 0 @@ -103,70 +122,76 @@ INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ - relname | relacl ----------------+-------------------------------------- - user1_table_2 | {user1=arwdDxt/user1,user2=r/user1} - user1_table_5 | {user1=arwdDxt/user1,user2=ar/user1} - user1_table_6 | {user1=arwdDxt/user1,user2=ar/user1} +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+---------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxt/pathman_user1,pathman_user2=ar/pathman_user1} (3 rows) /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); -ERROR: must be owner of relation user1_table_4 +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); set_auto ---------- (1 row) /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; ERROR: no suitable partition for key '55' /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); -NOTICE: 10 rows copied from permissions.user1_table_1 -NOTICE: 10 rows copied from permissions.user1_table_2 -NOTICE: 0 rows copied from permissions.user1_table_4 -NOTICE: 0 rows copied from permissions.user1_table_5 -NOTICE: 1 rows copied from permissions.user1_table_6 +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 drop_partitions ----------------- 5 (1 row) /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); create_hash_partitions ------------------------ 3 (1 row) -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); -NOTICE: 9 rows copied from permissions.user2_table_0 -NOTICE: 11 rows copied from permissions.user2_table_1 -NOTICE: 10 rows copied from permissions.user2_table_2 +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 drop_partitions ----------------- 3 (1 row) /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); create_range_partitions ------------------------- @@ -178,11 +203,11 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} (3 rows) ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ @@ -197,12 +222,12 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} (4 rows) ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ @@ -217,22 +242,22 @@ WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list WHERE parent = 'permissions.dropped_column'::REGCLASS) AND attacl IS NOT NULL ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ - attrelid | attname | attacl -------------------------------+---------+------------------ - permissions.dropped_column_1 | val | {user2=ar/user1} - permissions.dropped_column_2 | val | {user2=ar/user1} - permissions.dropped_column_3 | val | {user2=ar/user1} - permissions.dropped_column_4 | val | {user2=ar/user1} - permissions.dropped_column_5 | val | {user2=ar/user1} + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} (5 rows) DROP TABLE permissions.dropped_column CASCADE; NOTICE: drop cascades to 6 other objects /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; -DROP SCHEMA permissions CASCADE; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_permissions_1.out b/expected/pathman_permissions_1.out new file mode 100644 index 00000000..dc976aae --- /dev/null +++ b/expected/pathman_permissions_1.out @@ -0,0 +1,263 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA permissions; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; +/* Should fail (can't SELECT) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; +/* Should fail (don't own parent) */ +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Should be ok */ +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +/* Should be able to see */ +SET ROLE pathman_user2; +SELECT * FROM pathman_config; + partrel | expr | parttype | range_interval +---------------------------------+------+----------+---------------- + permissions.pathman_user1_table | id | 2 | 10 +(1 row) + +SELECT * FROM pathman_config_params; + partrel | enable_parent | auto | init_callback | spawn_using_bgw +---------------------------------+---------------+------+---------------+----------------- + permissions.pathman_user1_table | f | t | | f +(1 row) + +/* Should fail */ +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +SELECT set_auto('permissions.pathman_user1_table', false); +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +ERROR: new row violates row-level security policy for table "pathman_config_params" +/* Should fail */ +SET ROLE pathman_user2; +DELETE FROM pathman_config +WHERE partrel = 'permissions.pathman_user1_table'::regclass; +WARNING: only the owner or superuser can change partitioning configuration of table "pathman_user1_table" +/* No rights to insert, should fail */ +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* No rights to create partitions (need INSERT privilege) */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); +ERROR: permission denied for parent relation "pathman_user1_table" +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ +/* Should be able to prepend a partition */ +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); + prepend_range_partition +----------------------------------- + permissions.pathman_user1_table_4 +(1 row) + +SELECT attname, attacl FROM pg_attribute +WHERE attrelid = (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_min::int ASC /* prepend */ + LIMIT 1) +ORDER BY attname; /* check ACL for each column */ + attname | attacl +----------+--------------------------------- + a | {pathman_user2=w/pathman_user1} + cmax | + cmin | + ctid | + id | + tableoid | + xmax | + xmin | +(8 rows) + +/* Have rights, should be ok (parent's ACL is shared by new children) */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; + id | a +----+--- + 35 | 0 +(1 row) + +SELECT relname, relacl FROM pg_class +WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS + ORDER BY range_max::int DESC /* append */ + LIMIT 3) +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ + relname | relacl +-----------------------+----------------------------------------------------------------------- + pathman_user1_table_2 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=r/pathman_user1} + pathman_user1_table_5 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} + pathman_user1_table_6 | {pathman_user1=arwdDxtm/pathman_user1,pathman_user2=ar/pathman_user1} +(3 rows) + +/* Try to drop partition, should fail */ +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; +NOTICE: Insufficient priviliges +/* Disable automatic partition creation */ +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); + set_auto +---------- + +(1 row) + +/* Partition creation, should fail */ +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; +ERROR: no suitable partition for key '55' +/* Finally drop partitions */ +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); +NOTICE: 10 rows copied from permissions.pathman_user1_table_1 +NOTICE: 10 rows copied from permissions.pathman_user1_table_2 +NOTICE: 0 rows copied from permissions.pathman_user1_table_4 +NOTICE: 0 rows copied from permissions.pathman_user1_table_5 +NOTICE: 1 rows copied from permissions.pathman_user1_table_6 + drop_partitions +----------------- + 5 +(1 row) + +/* Switch to #2 */ +SET ROLE pathman_user2; +/* Test ddl event trigger */ +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); +NOTICE: 9 rows copied from permissions.pathman_user2_table_0 +NOTICE: 11 rows copied from permissions.pathman_user2_table_1 +NOTICE: 10 rows copied from permissions.pathman_user2_table_2 + drop_partitions +----------------- + 3 +(1 row) + +/* Switch to #1 */ +SET ROLE pathman_user1; +CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); +INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; +SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); + create_range_partitions +------------------------- + 3 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} +(3 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN a; /* DROP "a" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_4 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} +(4 rows) + +ALTER TABLE permissions.dropped_column DROP COLUMN b; /* DROP "b" */ +SELECT append_range_partition('permissions.dropped_column'); + append_range_partition +------------------------------ + permissions.dropped_column_5 +(1 row) + +SELECT attrelid::regclass, attname, attacl FROM pg_attribute +WHERE attrelid = ANY (SELECT "partition" FROM pathman_partition_list + WHERE parent = 'permissions.dropped_column'::REGCLASS) + AND attacl IS NOT NULL +ORDER BY attrelid::regclass::text; /* check ACL for each column (+1 partition) */ + attrelid | attname | attacl +------------------------------+---------+---------------------------------- + permissions.dropped_column_1 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_2 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_3 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_4 | val | {pathman_user2=ar/pathman_user1} + permissions.dropped_column_5 | val | {pathman_user2=ar/pathman_user1} +(5 rows) + +DROP TABLE permissions.dropped_column CASCADE; +NOTICE: drop cascades to 6 other objects +/* Finally reset user */ +RESET ROLE; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; +DROP SCHEMA permissions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes.out b/expected/pathman_rebuild_deletes.out new file mode 100644 index 00000000..a5edc242 --- /dev/null +++ b/expected/pathman_rebuild_deletes.out @@ -0,0 +1,106 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= '(100,8)'::record) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_deletes_1.out b/expected/pathman_rebuild_deletes_1.out new file mode 100644 index 00000000..eb2f5001 --- /dev/null +++ b/expected/pathman_rebuild_deletes_1.out @@ -0,0 +1,106 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); + append_range_partition +------------------------ + test_deletes.test_11 +(1 row) + +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; + QUERY PLAN +--------------------------- + Delete on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 1 | test_deletes.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; + QUERY PLAN +----------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+----+---------------------- + 101 | 10 | test_deletes.test_11 +(1 row) + +CREATE TABLE test_deletes.test_dummy (val INT4); +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Delete on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Delete on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Delete on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_deletes.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +DROP TABLE test_deletes.test_dummy; +DROP TABLE test_deletes.test CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates.out b/expected/pathman_rebuild_updates.out index d06f7c5b..40c5b048 100644 --- a/expected/pathman_rebuild_updates.out +++ b/expected/pathman_rebuild_updates.out @@ -1,3 +1,9 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; @@ -24,6 +30,7 @@ SELECT append_range_partition('test_updates.test'); (1 row) INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; QUERY PLAN @@ -43,14 +50,10 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; QUERY PLAN ----------------------------- - Update on test - Update on test - Update on test_11 - -> Seq Scan on test - Filter: (val = 101) + Update on test_11 -> Seq Scan on test_11 Filter: (val = 101) -(7 rows) +(3 rows) UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; val | b | tableoid @@ -58,6 +61,140 @@ UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLA 101 | 0 | test_updates.test_11 (1 row) -DROP SCHEMA test_updates CASCADE; -NOTICE: drop cascades to 13 other objects +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= '(100,8)'::record) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rebuild_updates_1.out b/expected/pathman_rebuild_updates_1.out new file mode 100644 index 00000000..57b3297a --- /dev/null +++ b/expected/pathman_rebuild_updates_1.out @@ -0,0 +1,200 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_updates; +/* + * Test UPDATEs on a partition with different TupleDescriptor. + */ +/* create partitioned table */ +CREATE TABLE test_updates.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_updates.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_updates.test', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* drop column 'a' */ +ALTER TABLE test_updates.test DROP COLUMN a; +/* append new partition */ +SELECT append_range_partition('test_updates.test'); + append_range_partition +------------------------ + test_updates.test_11 +(1 row) + +INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; + QUERY PLAN +--------------------------- + Update on test_1 + -> Seq Scan on test_1 + Filter: (val = 1) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+--------------------- + 1 | 0 | test_updates.test_1 +(1 row) + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; + QUERY PLAN +----------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (val = 101) +(3 rows) + +UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+---+---------------------- + 101 | 0 | test_updates.test_11 +(1 row) + +CREATE TABLE test_updates.test_dummy (val INT4); +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +------------------------------------ + Update on test_11 + -> Nested Loop Semi Join + -> Seq Scan on test_11 + Filter: (val = 101) + -> Seq Scan on test_dummy + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + QUERY PLAN +--------------------------------------- + Update on test_11 t1 + -> Nested Loop + -> Seq Scan on test_11 t1 + Filter: (val = 101) + -> Seq Scan on test_dummy t2 + Filter: (val = 101) +(6 rows) + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on test_11 + -> Seq Scan on test_11 + Filter: (((test_11.*)::test_updates.test >= ROW(100, 8)) AND (val = 101)) +(3 rows) + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + test +--------- + (101,0) +(1 row) + +DROP TABLE test_updates.test_dummy; +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + parent | partition | parttype | expr | range_min | range_max | columns +-------------------+----------------------+----------+------+-----------+-----------+--------- + test_updates.test | test_updates.test_1 | 2 | val | 1 | 11 | 9 + test_updates.test | test_updates.test_2 | 2 | val | 11 | 21 | 9 + test_updates.test | test_updates.test_3 | 2 | val | 21 | 31 | 9 + test_updates.test | test_updates.test_4 | 2 | val | 31 | 41 | 9 + test_updates.test | test_updates.test_5 | 2 | val | 41 | 51 | 9 + test_updates.test | test_updates.test_6 | 2 | val | 51 | 61 | 9 + test_updates.test | test_updates.test_7 | 2 | val | 61 | 71 | 9 + test_updates.test | test_updates.test_8 | 2 | val | 71 | 81 | 9 + test_updates.test | test_updates.test_9 | 2 | val | 81 | 91 | 9 + test_updates.test | test_updates.test_10 | 2 | val | 91 | 101 | 9 + test_updates.test | test_updates.test_11 | 2 | val | 101 | 111 | 8 +(11 rows) + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 106 | 105 | test_updates.test_11 +(1 row) + +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 115 | 105 | test_updates.test_12 +(1 row) + +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + 95 | 105 | test_updates.test_10 +(1 row) + +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + val | b | tableoid +-----+-----+---------------------- + -1 | 105 | test_updates.test_13 +(1 row) + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); + create_range_partitions +------------------------- + 1 +(1 row) + +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; + val | x +-----+--- + 11 | +(1 row) + +drop table test_updates.test_5113 cascade; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_updates.test CASCADE; +NOTICE: drop cascades to 14 other objects +DROP SCHEMA test_updates; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks.out b/expected/pathman_rowmarks.out index e66c41d9..6d4611ee 100644 --- a/expected/pathman_rowmarks.out +++ b/expected/pathman_rowmarks.out @@ -1,3 +1,32 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; CREATE TABLE rowmarks.first(id int NOT NULL); @@ -10,6 +39,7 @@ SELECT create_hash_partitions('rowmarks.first', 'id', 5); 5 (1 row) +VACUUM ANALYZE; /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; id @@ -168,13 +198,213 @@ FOR SHARE; 6 (1 row) -DROP SCHEMA rowmarks CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to table rowmarks.first -drop cascades to table rowmarks.second -drop cascades to table rowmarks.first_0 +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) +(7 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 drop cascades to table rowmarks.first_1 drop cascades to table rowmarks.first_2 drop cascades to table rowmarks.first_3 drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_1.out b/expected/pathman_rowmarks_1.out new file mode 100644 index 00000000..063fca8d --- /dev/null +++ b/expected/pathman_rowmarks_1.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(10 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +--------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_5.id + -> Append + -> Seq Scan on first first_5 + -> Seq Scan on first_0 first_0_1 + -> Seq Scan on first_1 first_1_1 + -> Seq Scan on first_2 first_2_1 + -> Seq Scan on first_3 first_3_1 + -> Seq Scan on first_4 first_4_1 + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 + Filter: (id = $1) + -> Seq Scan on first_1 + Filter: (id = $1) + -> Seq Scan on first_2 + Filter: (id = $1) + -> Seq Scan on first_3 + Filter: (id = $1) + -> Seq Scan on first_4 + Filter: (id = $1) +(26 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Append + -> Seq Scan on first + Filter: (id = $1) + -> Seq Scan on first_0 + Filter: (id = $1) + -> Seq Scan on first_1 + Filter: (id = $1) + -> Seq Scan on first_2 + Filter: (id = $1) + -> Seq Scan on first_3 + Filter: (id = $1) + -> Seq Scan on first_4 + Filter: (id = $1) +(20 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(14 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Append + -> Seq Scan on first + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 1) + -> Seq Scan on first_2 + Filter: (id = 1) + -> Seq Scan on first_3 + Filter: (id = 1) + -> Seq Scan on first_4 + Filter: (id = 1) +(17 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +--------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: (id < 1) + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(18 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +---------------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on first_4 + Filter: ((id = 1) OR (id = 2)) +(18 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_2.out b/expected/pathman_rowmarks_2.out new file mode 100644 index 00000000..91d7804e --- /dev/null +++ b/expected/pathman_rowmarks_2.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +--------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_0.id + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +--------------------------------------------------- + LockRows + -> Sort + Sort Key: first_0.id + -> Hash Join + Hash Cond: (first_0.id = second.id) + -> Append + -> Seq Scan on first_0 + -> Seq Scan on first_1 + -> Seq Scan on first_2 + -> Seq Scan on first_3 + -> Seq Scan on first_4 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id < 1) + -> Seq Scan on first_1 + Filter: (id < 1) + -> Seq Scan on first_2 + Filter: (id < 1) + -> Seq Scan on first_3 + Filter: (id < 1) + -> Seq Scan on first_4 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first_0.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 + Filter: (id = 1) + -> Seq Scan on first_1 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_3.out b/expected/pathman_rowmarks_3.out new file mode 100644 index 00000000..e8644292 --- /dev/null +++ b/expected/pathman_rowmarks_3.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +---------------------------------------------- + LockRows + InitPlan 1 (returns $1) + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = $1) + -> Seq Scan on first_0 first + Filter: (id = $1) + -> Seq Scan on first_1 first + Filter: (id = $1) + -> Seq Scan on first_2 first + Filter: (id = $1) + -> Seq Scan on first_3 first + Filter: (id = $1) + -> Seq Scan on first_4 first + Filter: (id = $1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_rowmarks_4.out b/expected/pathman_rowmarks_4.out new file mode 100644 index 00000000..5fbec84d --- /dev/null +++ b/expected/pathman_rowmarks_4.out @@ -0,0 +1,407 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA rowmarks; +CREATE TABLE rowmarks.first(id int NOT NULL); +CREATE TABLE rowmarks.second(id int NOT NULL); +INSERT INTO rowmarks.first SELECT generate_series(1, 10); +INSERT INTO rowmarks.second SELECT generate_series(1, 10); +SELECT create_hash_partitions('rowmarks.first', 'id', 5); + create_hash_partitions +------------------------ + 5 +(1 row) + +VACUUM ANALYZE; +/* Not partitioned */ +SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* Simple case (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + QUERY PLAN +----------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 +(9 rows) + +/* Simple case (execution) */ +SELECT * FROM rowmarks.first ORDER BY id FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT FROM rowmarks.first ORDER BY id FOR UPDATE; +-- +(10 rows) + +SELECT tableoid > 0 FROM rowmarks.first ORDER BY id FOR UPDATE; + ?column? +---------- + t + t + t + t + t + t + t + t + t + t +(10 rows) + +/* A little harder (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 10 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +------------------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: first_1.id + -> Append + -> Seq Scan on first_0 first_2 + -> Seq Scan on first_1 first_3 + -> Seq Scan on first_2 first_4 + -> Seq Scan on first_3 first_5 + -> Seq Scan on first_4 first_6 + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(24 rows) + +/* A little harder (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.first + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* Two tables (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + QUERY PLAN +-------------------------------------------------- + LockRows + InitPlan 1 + -> Limit + -> LockRows + -> Sort + Sort Key: second.id + -> Seq Scan on second + -> Custom Scan (RuntimeAppend) + Prune by: (first.id = (InitPlan 1).col1) + -> Seq Scan on first_0 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_1 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_2 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_3 first + Filter: (id = (InitPlan 1).col1) + -> Seq Scan on first_4 first + Filter: (id = (InitPlan 1).col1) +(19 rows) + +/* Two tables (execution) */ +SELECT * FROM rowmarks.first +WHERE id = (SELECT id FROM rowmarks.second + ORDER BY id + OFFSET 5 LIMIT 1 + FOR UPDATE) +FOR SHARE; + id +---- + 6 +(1 row) + +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Sort + Sort Key: first.id + -> Hash Join + Hash Cond: (first.id = second.id) + -> Append + -> Seq Scan on first_0 first_1 + -> Seq Scan on first_1 first_2 + -> Seq Scan on first_2 first_3 + -> Seq Scan on first_3 first_4 + -> Seq Scan on first_4 first_5 + -> Hash + -> Seq Scan on second +(13 rows) + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + id +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + QUERY PLAN +------------------------- + LockRows + -> Seq Scan on first +(2 rows) + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + id +---- +(0 rows) + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Update on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; + QUERY PLAN +--------------------------------------- + Update on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + id | tableoid +----+----------------- + 1 | rowmarks.second + 1 | rowmarks.second +(2 rows) + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); + QUERY PLAN +--------------------------------------- + Delete on second + -> Nested Loop Semi Join + -> Seq Scan on second + Filter: (id = 1) + -> Seq Scan on first_0 first + Filter: (id = 1) +(6 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id < 1) + -> Seq Scan on first_1 first_2 + Filter: (id < 1) + -> Seq Scan on first_2 first_3 + Filter: (id < 1) + -> Seq Scan on first_3 first_4 + Filter: (id < 1) + -> Seq Scan on first_4 first_5 + Filter: (id < 1) +(16 rows) + +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); + QUERY PLAN +----------------------------------------------------- + Delete on second + -> Nested Loop Semi Join + Join Filter: (second.id = first.id) + -> Seq Scan on second + -> Materialize + -> Append + -> Seq Scan on first_0 first_1 + Filter: (id = 1) + -> Seq Scan on first_1 first_2 + Filter: (id = 2) +(10 rows) + +SET enable_hashjoin = t; +SET enable_mergejoin = t; +DROP TABLE rowmarks.first CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table rowmarks.first_0 +drop cascades to table rowmarks.first_1 +drop cascades to table rowmarks.first_2 +drop cascades to table rowmarks.first_3 +drop cascades to table rowmarks.first_4 +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_runtime_nodes.out b/expected/pathman_runtime_nodes.out index ef9aaa93..f699ddeb 100644 --- a/expected/pathman_runtime_nodes.out +++ b/expected/pathman_runtime_nodes.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; @@ -57,7 +58,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_2() returns text as $$ @@ -99,7 +99,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_3() returns text as $$ @@ -132,7 +131,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_4() returns text as $$ @@ -171,7 +169,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; create or replace function test.pathman_test_5() returns text as $$ @@ -232,7 +229,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; create table test.run_values as select generate_series(1, 10000) val; @@ -403,6 +399,37 @@ where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < t (1 row) +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects set enable_hashjoin = off; set enable_mergejoin = off; select from test.runtime_test_4 @@ -412,7 +439,67 @@ where id = any (select generate_series(-10, -1)); /* should be empty */ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; -NOTICE: drop cascades to 37 other objects +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+------------- + 2 | part_test_1 +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "pg_pathman.enable" must be called before any query, ignored + val | tableoid +-----+------------ + 3 | pg_pathman +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION part_test_trigger(); DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_runtime_nodes_1.out b/expected/pathman_runtime_nodes_1.out new file mode 100644 index 00000000..e975c761 --- /dev/null +++ b/expected/pathman_runtime_nodes_1.out @@ -0,0 +1,505 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +/* + * Test RuntimeAppend + */ +create or replace function test.pathman_assert(smt bool, error_msg text) returns text as $$ +begin + if not smt then + raise exception '%', error_msg; + end if; + + return 'ok'; +end; +$$ language plpgsql; +create or replace function test.pathman_equal(a text, b text, error_msg text) returns text as $$ +begin + if a != b then + raise exception '''%'' is not equal to ''%'', %', a, b, error_msg; + end if; + + return 'equal'; +end; +$$ language plpgsql; +create or replace function test.pathman_test(query text) returns jsonb as $$ +declare + plan jsonb; +begin + execute 'explain (analyze, format json)' || query into plan; + + return plan; +end; +$$ language plpgsql; +create or replace function test.pathman_test_1() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = (select * from test.run_values limit 1)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Relation Name')::text, + format('"runtime_test_1_%s"', pathman.get_hash_part_idx(hashint4(1), 6)), + 'wrong partition'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans') into num; + perform test.pathman_equal(num::text, '2', 'expected 2 child plans for custom scan'); + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_2() returns text as $$ +declare + plan jsonb; + num int; + c text; +begin + plan = test.pathman_test('select * from test.runtime_test_1 where id = any (select * from test.run_values limit 4)'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + execute 'select string_agg(y.z, '','') from + (select (x->''Relation Name'')::text as z from + jsonb_array_elements($1->0->''Plan''->''Plans''->1->''Plans'') x + order by x->''Relation Name'') y' + into c using plan; + perform test.pathman_equal(c, '"runtime_test_1_2","runtime_test_1_3","runtime_test_1_4","runtime_test_1_5"', + 'wrong partitions'); + + for i in 0..3 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_equal(num::text, '1', 'expected 1 loop'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_3() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.runtime_test_1 a join test.run_values b on a.id = b.val'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Custom Plan Provider')::text, + '"RuntimeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans') into num; + perform test.pathman_equal(num::text, '6', 'expected 6 child plans for custom scan'); + + for i in 0..5 loop + num = plan->0->'Plan'->'Plans'->1->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num > 0 and num <= 1718, 'expected no more than 1718 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_4() returns text as $$ +declare + plan jsonb; + num int; +begin + plan = test.pathman_test('select * from test.category c, lateral' || + '(select * from test.runtime_test_2 g where g.category_id = c.id order by rating limit 4) as tg'); + + perform test.pathman_equal((plan->0->'Plan'->'Node Type')::text, + '"Nested Loop"', + 'wrong plan type'); + + /* Limit -> Custom Scan */ + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Node Type')::text, + '"Custom Scan"', + 'wrong plan type'); + + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->0->'Custom Plan Provider')::text, + '"RuntimeMergeAppend"', + 'wrong plan provider'); + + select count(*) from jsonb_array_elements_text(plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans') into num; + perform test.pathman_equal(num::text, '4', 'expected 4 child plans for custom scan'); + + for i in 0..3 loop + perform test.pathman_equal((plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Relation Name')::text, + format('"runtime_test_2_%s"', pathman.get_hash_part_idx(hashint4(i + 1), 6)), + 'wrong partition'); + + num = plan->0->'Plan'->'Plans'->1->'Plans'->0->'Plans'->i->'Actual Loops'; + perform test.pathman_assert(num = 1, 'expected no more than 1 loops'); + end loop; + + return 'ok'; +end; +$$ language plpgsql +set enable_mergejoin = off +set enable_hashjoin = off; +create or replace function test.pathman_test_5() returns text as $$ +declare + res record; +begin + select + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test empty tlist */ + + + select id * 2, id, 17 + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + limit 1 + into res; /* test computations */ + + + select test.vals.* from test.vals, lateral (select from test.runtime_test_3 + where id = test.vals.val) as q + into res; /* test lateral */ + + + select id, generate_series(1, 2) gen, val + from test.runtime_test_3 + where id = (select * from test.vals order by val limit 1) + order by id, gen, val + offset 1 limit 1 + into res; /* without IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '1', 'id is incorrect (t2)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t2)'); + perform test.pathman_equal(res.val::text, 'k = 1', 'val is incorrect (t2)'); + + + select id + from test.runtime_test_3 + where id = any (select * from test.vals order by val limit 5) + order by id + offset 3 limit 1 + into res; /* with IndexOnlyScan */ + + perform test.pathman_equal(res.id::text, '4', 'id is incorrect (t3)'); + + + select v.val v1, generate_series(2, 2) gen, t.val v2 + from test.runtime_test_3 t join test.vals v on id = v.val + order by v1, gen, v2 + limit 1 + into res; + + perform test.pathman_equal(res.v1::text, '1', 'v1 is incorrect (t4)'); + perform test.pathman_equal(res.gen::text, '2', 'gen is incorrect (t4)'); + perform test.pathman_equal(res.v2::text, 'k = 1', 'v2 is incorrect (t4)'); + + return 'ok'; +end; +$$ language plpgsql +set enable_hashjoin = off +set enable_mergejoin = off; +create table test.run_values as select generate_series(1, 10000) val; +create table test.runtime_test_1(id serial primary key, val real); +insert into test.runtime_test_1 select generate_series(1, 10000), random(); +select pathman.create_hash_partitions('test.runtime_test_1', 'id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.category as (select id, 'cat' || id::text as name from generate_series(1, 4) id); +create table test.runtime_test_2 (id serial, category_id int not null, name text, rating real); +insert into test.runtime_test_2 (select id, (id % 6) + 1 as category_id, 'good' || id::text as name, random() as rating from generate_series(1, 100000) id); +create index on test.runtime_test_2 (category_id, rating); +select pathman.create_hash_partitions('test.runtime_test_2', 'category_id', 6); + create_hash_partitions +------------------------ + 6 +(1 row) + +create table test.vals as (select generate_series(1, 10000) as val); +create table test.runtime_test_3(val text, id serial not null); +insert into test.runtime_test_3(id, val) select * from generate_series(1, 10000) k, format('k = %s', k); +select pathman.create_hash_partitions('test.runtime_test_3', 'id', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +create index on test.runtime_test_3 (id); +create index on test.runtime_test_3_0 (id); +create table test.runtime_test_4(val text, id int not null); +insert into test.runtime_test_4(id, val) select * from generate_series(1, 10000) k, md5(k::text); +select pathman.create_range_partitions('test.runtime_test_4', 'id', 1, 2000); + create_range_partitions +------------------------- + 5 +(1 row) + +VACUUM ANALYZE; +set pg_pathman.enable_runtimeappend = on; +set pg_pathman.enable_runtimemergeappend = on; +select test.pathman_test_1(); /* RuntimeAppend (select ... where id = (subquery)) */ + pathman_test_1 +---------------- + ok +(1 row) + +select test.pathman_test_2(); /* RuntimeAppend (select ... where id = any(subquery)) */ + pathman_test_2 +---------------- + ok +(1 row) + +select test.pathman_test_3(); /* RuntimeAppend (a join b on a.id = b.val) */ + pathman_test_3 +---------------- + ok +(1 row) + +select test.pathman_test_4(); /* RuntimeMergeAppend (lateral) */ + pathman_test_4 +---------------- + ok +(1 row) + +select test.pathman_test_5(); /* projection tests for RuntimeXXX nodes */ + pathman_test_5 +---------------- + ok +(1 row) + +/* RuntimeAppend (join, enabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', true); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Seq Scan on runtime_test_1 t1 + Filter: (id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(19 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, disabled parent) */ +select pathman.set_enable_parent('test.runtime_test_1', false); + set_enable_parent +------------------- + +(1 row) + +explain (costs off) +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + QUERY PLAN +-------------------------------------------------------------------------------- + Nested Loop + -> Limit + -> Seq Scan on run_values + -> Custom Scan (RuntimeAppend) + Prune by: (t1.id = run_values.val) + -> Index Only Scan using runtime_test_1_0_pkey on runtime_test_1_0 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_1_pkey on runtime_test_1_1 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_2_pkey on runtime_test_1_2 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_3_pkey on runtime_test_1_3 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_4_pkey on runtime_test_1_4 t1 + Index Cond: (id = run_values.val) + -> Index Only Scan using runtime_test_1_5_pkey on runtime_test_1_5 t1 + Index Cond: (id = run_values.val) +(17 rows) + +select from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; +-- +(4 rows) + +/* RuntimeAppend (join, additional projections) */ +select generate_series(1, 2) from test.runtime_test_1 as t1 +join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; + generate_series +----------------- + 1 + 2 + 1 + 2 + 1 + 2 + 1 + 2 +(8 rows) + +/* RuntimeAppend (select ... where id = ANY (subquery), missing partitions) */ +select count(*) = 0 from pathman.pathman_partition_list +where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; + ?column? +---------- + t +(1 row) + +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); + create_hash_partitions +------------------------ + 4 +(1 row) + +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Function Scan on generate_series f + -> Aggregate + -> Custom Scan (RuntimeAppend) + Prune by: (dropped_cols.val = f.id) + -> Seq Scan on dropped_cols_0 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_1 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_2 dropped_cols + Filter: (val = f.id) + -> Seq Scan on dropped_cols_3 dropped_cols + Filter: (val = f.id) +(13 rows) + +drop table test.dropped_cols cascade; +NOTICE: drop cascades to 4 other objects +set enable_hashjoin = off; +set enable_mergejoin = off; +select from test.runtime_test_4 +where id = any (select generate_series(-10, -1)); /* should be empty */ +-- +(0 rows) + +set enable_hashjoin = on; +set enable_mergejoin = on; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_2 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP TABLE test.runtime_test_3 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE test.runtime_test_4 CASCADE; +NOTICE: drop cascades to 6 other objects +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); + create_hash_partitions +------------------------ + 2 +(1 row) + +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + val | tableoid +-----+------------- + 2 | part_test_1 +(1 row) + +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +NOTICE: AFTER DELETE ROW (part_test_1) +WARNING: "pg_pathman.enable" must be called before any query, ignored + val | tableoid +-----+------------ + 3 | pg_pathman +(1 row) + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION part_test_trigger(); +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_subpartitions.out b/expected/pathman_subpartitions.out new file mode 100644 index 00000000..3a6a19eb --- /dev/null +++ b/expected/pathman_subpartitions.out @@ -0,0 +1,467 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Append + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------------------- + Append + -> Append + -> Seq Scan on abc_3_2 + Filter: (a >= 210) +(4 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_1.out b/expected/pathman_subpartitions_1.out new file mode 100644 index 00000000..d620cde9 --- /dev/null +++ b/expected/pathman_subpartitions_1.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + -> Seq Scan on abc_1_1 + -> Seq Scan on abc_1_2 + -> Append + -> Seq Scan on abc_2_0 + Filter: (a < 150) + -> Seq Scan on abc_2_1 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 + Filter: (b = 215) + -> Seq Scan on abc_1_1 + Filter: (b = 215) + -> Seq Scan on abc_1_2 + Filter: (b = 215) + -> Seq Scan on abc_2_1 + Filter: (b = 215) + -> Seq Scan on abc_3_2 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +---------------------- + Seq Scan on abc_3_2 + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_subpartitions_2.out b/expected/pathman_subpartitions_2.out new file mode 100644 index 00000000..26eae913 --- /dev/null +++ b/expected/pathman_subpartitions_2.out @@ -0,0 +1,461 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ +\set VERBOSITY terse +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT * FROM pathman_partition_list; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | a | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | a | 100 | 200 + subpartitions.abc_1 | subpartitions.abc_1_0 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_1 | 1 | a | | + subpartitions.abc_1 | subpartitions.abc_1_2 | 1 | a | | + subpartitions.abc_2 | subpartitions.abc_2_0 | 1 | b | | + subpartitions.abc_2 | subpartitions.abc_2_1 | 1 | b | | +(7 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_1_2 | 1 | 1 + subpartitions.abc_1_0 | 21 | 21 + subpartitions.abc_1_1 | 41 | 41 + subpartitions.abc_1_0 | 61 | 61 + subpartitions.abc_1_2 | 81 | 81 + subpartitions.abc_2_0 | 101 | 101 + subpartitions.abc_2_1 | 121 | 121 + subpartitions.abc_2_0 | 141 | 141 + subpartitions.abc_2_1 | 161 | 161 + subpartitions.abc_2_1 | 181 | 181 +(10 rows) + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | b | 200 | 210 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | b | 210 | 220 +(2 rows) + +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_3_2 | 215 | 215 +(1 row) + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + -> Seq Scan on abc_1_1 abc_3 + -> Seq Scan on abc_1_2 abc_4 + -> Append + -> Seq Scan on abc_2_0 abc_6 + Filter: (a < 150) + -> Seq Scan on abc_2_1 abc_7 + Filter: (a < 150) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; + QUERY PLAN +--------------------------------------- + Append + -> Append + -> Seq Scan on abc_1_0 abc_2 + Filter: (b = 215) + -> Seq Scan on abc_1_1 abc_3 + Filter: (b = 215) + -> Seq Scan on abc_1_2 abc_4 + Filter: (b = 215) + -> Seq Scan on abc_2_1 abc_5 + Filter: (b = 215) + -> Seq Scan on abc_3_2 abc_6 + Filter: (b = 215) +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; + QUERY PLAN +------------------------------------- + Seq Scan on abc_3_2 abc + Filter: ((a = 215) AND (b = 215)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + QUERY PLAN +------------------------- + Seq Scan on abc_3_2 abc + Filter: (a >= 210) +(2 rows) + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); + check_multilevel_queries +-------------------------- + +(1 row) + +DROP FUNCTION check_multilevel_queries(); +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); + append_range_partition +------------------------ + subpartitions.abc_4 +(1 row) + +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_0 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_0 + subpartitions.abc_2_1 + subpartitions.abc_3 + subpartitions.abc_3_1 + subpartitions.abc_3_2 + subpartitions.abc_4 + subpartitions.abc_4_0 + subpartitions.abc_4_1 +(14 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 15 other objects +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + create_range_partitions +------------------------- + 2 +(1 row) + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + tableoid | a | b +-----------------------+----+---- + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 + subpartitions.abc_1_1 | 25 | 25 +(10 rows) + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 + subpartitions.abc_2_1 | 125 | 25 +(10 rows) + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 + subpartitions.abc_2_2 | 125 | 75 +(10 rows) + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + tableoid | a | b +-----------------------+-----+----- + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 + subpartitions.abc_2_3 | 125 | 125 +(10 rows) + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +ERROR: cannot split partition that has children +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ + split_range_partition +----------------------- + subpartitions.abc_2_4 +(1 row) + +SELECT subpartitions.partitions_tree('subpartitions.abc'); + partitions_tree +-------------------------- + subpartitions.abc + subpartitions.abc_1 + subpartitions.abc_1_1 + subpartitions.abc_1_2 + subpartitions.abc_2 + subpartitions.abc_2_1 + subpartitions.abc_2_2 + subpartitions.abc_2_4 + subpartitions.abc_2_3 +(9 rows) + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ + append_range_partition +------------------------ + subpartitions.abc_3 +(1 row) + +INSERT INTO subpartitions.abc VALUES (250, 50); +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_2 | 250 | 50 +(2 rows) + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ + merge_range_partitions +------------------------ + subpartitions.abc_2_1 +(1 row) + +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + tableoid | a | b +-----------------------+-----+---- + subpartitions.abc_2_1 | 150 | 0 + subpartitions.abc_2_1 | 250 | 50 +(2 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 10 other objects +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); + prepend_range_partition +------------------------- + subpartitions.abc_3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); + create_range_partitions +------------------------- + 3 +(1 row) + +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + prepend_range_partition +------------------------- + subpartitions.abc_3_4 +(1 row) + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + parent | partition | parttype | expr | range_min | range_max +---------------------+-----------------------+----------+------+-----------+----------- + subpartitions.abc | subpartitions.abc_1 | 2 | id1 | 0 | 100 + subpartitions.abc | subpartitions.abc_2 | 2 | id1 | 100 | 200 + subpartitions.abc | subpartitions.abc_3 | 2 | id1 | -100 | 0 + subpartitions.abc_3 | subpartitions.abc_3_1 | 2 | id2 | 0 | 10 + subpartitions.abc_3 | subpartitions.abc_3_2 | 2 | id2 | 10 | 20 + subpartitions.abc_3 | subpartitions.abc_3_3 | 2 | id2 | 20 | 30 + subpartitions.abc_3 | subpartitions.abc_3_4 | 2 | id2 | -10 | 0 +(7 rows) + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 4 + subpartitions.abc_3_1 | -1 | 0 | 3 + subpartitions.abc_1 | 10 | 0 | 1 + subpartitions.abc_2 | 110 | 0 | 2 +(4 rows) + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + tableoid | id1 | id2 | val +-----------------------+-----+-----+----- + subpartitions.abc_3_4 | -1 | -1 | 1 + subpartitions.abc_3_4 | -1 | -1 | 2 + subpartitions.abc_3_4 | -1 | -1 | 3 + subpartitions.abc_3_4 | -1 | -1 | 4 +(4 rows) + +DROP TABLE subpartitions.abc CASCADE; +NOTICE: drop cascades to 9 other objects +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + create_range_partitions +------------------------- + 0 +(1 row) + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); + add_range_partition +----------------------- + subpartitions.a2_1020 +(1 row) + +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); + create_range_partitions +------------------------- + 1 +(1 row) + +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + n1 | n2 +----+---- + 12 | 32 + 19 | 39 +(2 rows) + +DROP TABLE subpartitions.a2 CASCADE; +NOTICE: drop cascades to 4 other objects +DROP TABLE subpartitions.a1; +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_upd_del.out b/expected/pathman_upd_del.out new file mode 100644 index 00000000..752cff27 --- /dev/null +++ b/expected/pathman_upd_del.out @@ -0,0 +1,473 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(7 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(7 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Append + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(12 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_1.out b/expected/pathman_upd_del_1.out new file mode 100644 index 00000000..6e0f312d --- /dev/null +++ b/expected/pathman_upd_del_1.out @@ -0,0 +1,473 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Update on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t +(12 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +--------------------------------------------------------- + Delete on tmp r + -> Nested Loop + Join Filter: (a1.id = a2.id) + -> Nested Loop + Join Filter: (r.id = a1.id) + -> Seq Scan on tmp r + -> Materialize + -> Append + -> Seq Scan on tmp2 a1 + -> Seq Scan on tmp2_1 a1_1 + -> Seq Scan on tmp2_2 a1_2 + -> Seq Scan on tmp2_3 a1_3 + -> Seq Scan on tmp2_4 a1_4 + -> Seq Scan on tmp2_5 a1_5 + -> Seq Scan on tmp2_6 a1_6 + -> Seq Scan on tmp2_7 a1_7 + -> Seq Scan on tmp2_8 a1_8 + -> Seq Scan on tmp2_9 a1_9 + -> Seq Scan on tmp2_10 a1_10 + -> Materialize + -> Append + -> Seq Scan on tmp2 a2 + -> Seq Scan on tmp2_1 a2_1 + -> Seq Scan on tmp2_2 a2_2 + -> Seq Scan on tmp2_3 a2_3 + -> Seq Scan on tmp2_4 a2_4 + -> Seq Scan on tmp2_5 a2_5 + -> Seq Scan on tmp2_6 a2_6 + -> Seq Scan on tmp2_7 a2_7 + -> Seq Scan on tmp2_8 a2_8 + -> Seq Scan on tmp2_9 a2_9 + -> Seq Scan on tmp2_10 a2_10 +(32 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +----------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Append + -> Seq Scan on tmp2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_1 t2_1 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2_2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2_3 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2_4 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2_5 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2_6 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2_7 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2_8 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2_9 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2_10 + Filter: (id = t.id) +(27 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Append + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Merge Join + Merge Cond: (r.id = t.id) + -> Merge Append + Sort Key: r.id + -> Index Scan using range_rel_pkey on range_rel r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Index Scan using range_rel_1_pkey on range_rel_1 r_1 + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Sort + Sort Key: t.id + -> Seq Scan on tmp t + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(17 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + CTE n + -> Append + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + Join Filter: (t.id = n.id) + -> Seq Scan on tmp t + -> CTE Scan on n + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(14 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +------------------------------------ + Delete on tmp t + CTE q + -> Append + -> Seq Scan on tmp2_1 + -> Seq Scan on tmp2_2 + -> Nested Loop Semi Join + Join Filter: (t.id = q.id) + -> Seq Scan on tmp t + -> CTE Scan on q +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_2.out b/expected/pathman_upd_del_2.out new file mode 100644 index 00000000..0826594c --- /dev/null +++ b/expected/pathman_upd_del_2.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_3.out b/expected/pathman_upd_del_3.out new file mode 100644 index 00000000..d11eb6f8 --- /dev/null +++ b/expected/pathman_upd_del_3.out @@ -0,0 +1,465 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +-------------------------------------------- + Update on tmp t + -> Seq Scan on tmp t + Filter: (SubPlan 1) + SubPlan 1 + -> Custom Scan (RuntimeAppend) + Prune by: (t2.id = t.id) + -> Seq Scan on tmp2_1 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_2 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_3 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_4 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_5 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_6 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_7 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_8 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_9 t2 + Filter: (id = t.id) + -> Seq Scan on tmp2_10 t2 + Filter: (id = t.id) +(26 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_upd_del_4.out b/expected/pathman_upd_del_4.out new file mode 100644 index 00000000..54330190 --- /dev/null +++ b/expected/pathman_upd_del_4.out @@ -0,0 +1,464 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + create_range_partitions +------------------------- + 12 +(1 row) + +VACUUM ANALYZE; +/* + * Test UPDATE and DELETE + */ +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +-----+--------------------------+------- + 166 | Tue Jun 15 00:00:00 2010 | 111 +(1 row) + +ROLLBACK; +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel_6 + -> Seq Scan on range_rel_6 + Filter: (dt = 'Tue Jun 15 00:00:00 2010'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Update on range_rel + -> Seq Scan on range_rel + Filter: (dt = 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + QUERY PLAN +-------------------------------------------------------------------------------- + Delete on range_rel + -> Seq Scan on range_rel + Filter: (dt < 'Mon Jan 01 00:00:00 1990'::timestamp without time zone) +(3 rows) + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; + id | dt | value +----+----+------- +(0 rows) + +ROLLBACK; +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Fri Jan 01 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on range_rel_1 r + -> Nested Loop + Join Filter: (r.id = t.id) + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Seq Scan on tmp t +(6 rows) + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + QUERY PLAN +-------------------------------------------------------------------------------------- + Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + QUERY PLAN +------------------------------------------------ + Delete on tmp r + -> Nested Loop + -> Nested Loop + -> Seq Scan on tmp r + -> Custom Scan (RuntimeAppend) + Prune by: (r.id = a1.id) + -> Seq Scan on tmp2_1 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_2 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_3 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_4 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_5 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_6 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_7 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_8 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_9 a1 + Filter: (r.id = id) + -> Seq Scan on tmp2_10 a1 + Filter: (r.id = id) + -> Custom Scan (RuntimeAppend) + Prune by: (a1.id = a2.id) + -> Seq Scan on tmp2_1 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_2 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_3 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_4 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_5 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_6 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_7 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_8 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_9 a2 + Filter: (a1.id = id) + -> Seq Scan on tmp2_10 a2 + Filter: (a1.id = id) +(48 rows) + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ERROR: DELETE and UPDATE queries with a join of partitioned tables are not supported +ROLLBACK; +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + QUERY PLAN +------------------------------------------ + Update on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: (t.id = t2.id) + -> Seq Scan on tmp2_1 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_3 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_4 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_5 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_6 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_7 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_8 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_9 t2 + Filter: (t.id = id) + -> Seq Scan on tmp2_10 t2 + Filter: (t.id = id) +(25 rows) + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Delete on tmp + -> Nested Loop + -> Seq Scan on tmp + -> Materialize + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) +(6 rows) + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on range_rel_1 r + -> Seq Scan on range_rel_1 r + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(8 rows) + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + -> Index Scan using range_rel_1_pkey on range_rel_1 r + Index Cond: (id = t.id) + Filter: (dt = 'Sat Jan 02 00:00:00 2010'::timestamp without time zone) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + QUERY PLAN +--------------------------------------------- + Delete on tmp + CTE q + -> Delete on tmp t + -> Nested Loop + -> Seq Scan on tmp t + Filter: (id = 2) + -> Seq Scan on tmp2_2 tmp2 + Filter: (id = 2) + -> Nested Loop + -> Seq Scan on tmp + -> CTE Scan on q +(11 rows) + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + QUERY PLAN +-------------------------------------------------------------- + Delete on tmp t + -> Nested Loop Semi Join + -> Seq Scan on tmp t + -> Custom Scan (RuntimeAppend) + Prune by: ((tmp2.id < 3) AND (t.id = tmp2.id)) + -> Seq Scan on tmp2_1 tmp2 + Filter: (t.id = id) + -> Seq Scan on tmp2_2 tmp2 + Filter: (t.id = id) +(9 rows) + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +NOTICE: drop cascades to 11 other objects +DROP TABLE test.range_rel CASCADE; +NOTICE: drop cascades to 13 other objects +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/expected/pathman_update_node.out b/expected/pathman_update_node.out new file mode 100644 index 00000000..9fc1d07f --- /dev/null +++ b/expected/pathman_update_node.out @@ -0,0 +1,454 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_node; +SET pg_pathman.enable_partitionrouter = ON; +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + QUERY PLAN +------------------------------------------------------------------------- + Custom Scan (PartitionOverseer) + -> Update on test_range_2 + -> Custom Scan (PartitionFilter) + -> Custom Scan (PartitionRouter) + -> Bitmap Heap Scan on test_range_2 + Recheck Cond: (val = '15'::numeric) + -> Bitmap Index Scan on test_range_2_val_idx + Index Cond: (val = '15'::numeric) +(8 rows) + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val < 10 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_1 | 5 | 1 + test_update_node.test_range_1 | 5 | 10 + test_update_node.test_range_1 | 5 | 2 + test_update_node.test_range_1 | 5 | 3 + test_update_node.test_range_1 | 5 | 4 + test_update_node.test_range_1 | 5 | 5 + test_update_node.test_range_1 | 5 | 6 + test_update_node.test_range_1 | 5 | 7 + test_update_node.test_range_1 | 5 | 8 + test_update_node.test_range_1 | 5 | 9 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_3 | 21 | 11 + test_update_node.test_range_3 | 22 | 12 + test_update_node.test_range_3 | 23 | 13 + test_update_node.test_range_3 | 24 | 14 + test_update_node.test_range_3 | 25 | 15 + test_update_node.test_range_3 | 26 | 16 + test_update_node.test_range_3 | 27 | 17 + test_update_node.test_range_3 | 28 | 18 + test_update_node.test_range_3 | 29 | 19 + test_update_node.test_range_3 | 30 | 20 + test_update_node.test_range_3 | 21 | 21 + test_update_node.test_range_3 | 22 | 22 + test_update_node.test_range_3 | 23 | 23 + test_update_node.test_range_3 | 24 | 24 + test_update_node.test_range_3 | 25 | 25 + test_update_node.test_range_3 | 26 | 26 + test_update_node.test_range_3 | 27 | 27 + test_update_node.test_range_3 | 28 | 28 + test_update_node.test_range_3 | 29 | 29 + test_update_node.test_range_3 | 30 | 30 +(20 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Move single row */ +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 90 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_9 | 90 | 80 + test_update_node.test_range_9 | 90 | 90 +(2 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Move single row (create new partition) */ +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = -1 +ORDER BY comment; + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_11 | -1 | 50 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Update non-key column */ +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 100 +ORDER BY comment; + tableoid | val | comment +--------------------------------+-----+--------- + test_update_node.test_range_10 | 100 | test! +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; +ERROR: cannot spawn a partition +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 70 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 70 | 70 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test trivial move (same key) */ +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 65 +ORDER BY comment; + tableoid | val | comment +-------------------------------+-----+--------- + test_update_node.test_range_7 | 65 | 65 +(1 row) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', + 101::NUMERIC, 111::NUMERIC); + attach_range_partition +--------------------------------- + test_update_node.test_range_inv +(1 row) + +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 105 +ORDER BY comment; + tableoid | val | comment +---------------------------------+-----+--------- + test_update_node.test_range_inv | 105 | 60 +(1 row) + +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; + val | comment +-----+--------- + 71 | 41 +(1 row) + +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; + val | comment +-----+--------- + 71 | 71 + 71 | 41 +(2 rows) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 106 | 61 +(1 row) + +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + val | comment +-----+--------- + 61 | 61 +(1 row) + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); + append_range_partition +-------------------------------- + test_update_node.test_range_12 +(1 row) + +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 115; + tableoid | val +--------------------------------+----- + test_update_node.test_range_12 | 115 +(1 row) + +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; +SELECT count(*) FROM test_update_node.test_range; + count +------- + 90 +(1 row) + +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 13 other objects +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + create_range_partitions +------------------------- + 10 +(1 row) + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 + test_update_node.test_range_11 | 101 +(10 rows) + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + tableoid | min +--------------------------------+----- + test_update_node.test_range_1 | 1 + test_update_node.test_range_2 | 11 + test_update_node.test_range_3 | 21 + test_update_node.test_range_4 | 31 + test_update_node.test_range_5 | 41 + test_update_node.test_range_6 | 51 + test_update_node.test_range_7 | 61 + test_update_node.test_range_8 | 71 + test_update_node.test_range_9 | 81 + test_update_node.test_range_10 | 91 +(10 rows) + +SELECT count(*) FROM test_update_node.test_range; + count +------- + 100 +(1 row) + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); + create_hash_partitions +------------------------ + 3 +(1 row) + +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 10 | 1 + test_update_node.test_hash_1 | 11 | 2 + test_update_node.test_hash_1 | 12 | 3 + test_update_node.test_hash_2 | 13 | 4 + test_update_node.test_hash_1 | 14 | 5 + test_update_node.test_hash_1 | 15 | 6 + test_update_node.test_hash_2 | 16 | 7 + test_update_node.test_hash_0 | 17 | 8 + test_update_node.test_hash_1 | 18 | 9 + test_update_node.test_hash_0 | 19 | 10 +(10 rows) + +/* Move all rows into single partition */ +UPDATE test_update_node.test_hash SET val = 1; +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 1 +ORDER BY comment; + tableoid | val | comment +------------------------------+-----+--------- + test_update_node.test_hash_2 | 1 | 1 + test_update_node.test_hash_2 | 1 | 10 + test_update_node.test_hash_2 | 1 | 2 + test_update_node.test_hash_2 | 1 | 3 + test_update_node.test_hash_2 | 1 | 4 + test_update_node.test_hash_2 | 1 | 5 + test_update_node.test_hash_2 | 1 | 6 + test_update_node.test_hash_2 | 1 | 7 + test_update_node.test_hash_2 | 1 | 8 + test_update_node.test_hash_2 | 1 | 9 +(10 rows) + +SELECT count(*) FROM test_update_node.test_hash; + count +------- + 10 +(1 row) + +/* Don't move any rows */ +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 3 +ORDER BY comment; + tableoid | val | comment +----------+-----+--------- +(0 rows) + +SELECT count(*) FROM test_update_node.test_hash; + count +------- + 10 +(1 row) + +DROP TABLE test_update_node.test_hash CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE test_update_node.test_range CASCADE; +NOTICE: drop cascades to 12 other objects +DROP SCHEMA test_update_node; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_update_trigger.out b/expected/pathman_update_trigger.out deleted file mode 100644 index fdc5438a..00000000 --- a/expected/pathman_update_trigger.out +++ /dev/null @@ -1,289 +0,0 @@ -\set VERBOSITY terse -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); - create_range_partitions -------------------------- - 10 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_range'); - create_update_triggers ------------------------- - -(1 row) - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_1 | 5 | 1 - test_update_trigger.test_range_1 | 5 | 10 - test_update_trigger.test_range_1 | 5 | 2 - test_update_trigger.test_range_1 | 5 | 3 - test_update_trigger.test_range_1 | 5 | 4 - test_update_trigger.test_range_1 | 5 | 5 - test_update_trigger.test_range_1 | 5 | 6 - test_update_trigger.test_range_1 | 5 | 7 - test_update_trigger.test_range_1 | 5 | 8 - test_update_trigger.test_range_1 | 5 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_3 | 21 | 11 - test_update_trigger.test_range_3 | 22 | 12 - test_update_trigger.test_range_3 | 23 | 13 - test_update_trigger.test_range_3 | 24 | 14 - test_update_trigger.test_range_3 | 25 | 15 - test_update_trigger.test_range_3 | 26 | 16 - test_update_trigger.test_range_3 | 27 | 17 - test_update_trigger.test_range_3 | 28 | 18 - test_update_trigger.test_range_3 | 29 | 19 - test_update_trigger.test_range_3 | 30 | 20 - test_update_trigger.test_range_3 | 21 | 21 - test_update_trigger.test_range_3 | 22 | 22 - test_update_trigger.test_range_3 | 23 | 23 - test_update_trigger.test_range_3 | 24 | 24 - test_update_trigger.test_range_3 | 25 | 25 - test_update_trigger.test_range_3 | 26 | 26 - test_update_trigger.test_range_3 | 27 | 27 - test_update_trigger.test_range_3 | 28 | 28 - test_update_trigger.test_range_3 | 29 | 29 - test_update_trigger.test_range_3 | 30 | 30 -(20 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_9 | 90 | 80 - test_update_trigger.test_range_9 | 90 | 90 -(2 rows) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_11 | -1 | 50 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - tableoid | val | comment ------------------------------------+-----+--------- - test_update_trigger.test_range_10 | 100 | test! -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 100 -(1 row) - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; -ERROR: cannot spawn a partition -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 70 | 70 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - tableoid | val | comment -----------------------------------+-----+--------- - test_update_trigger.test_range_7 | 65 | 65 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); - attach_range_partition ------------------------------------- - test_update_trigger.test_range_inv -(1 row) - -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - tableoid | val | comment -------------------------------------+-----+--------- - test_update_trigger.test_range_inv | 105 | 60 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); - append_range_partition ------------------------------------ - test_update_trigger.test_range_12 -(1 row) - -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - tableoid | val ------------------------------------+----- - test_update_trigger.test_range_12 | 115 -(1 row) - -SELECT count(*) FROM test_update_trigger.test_range; - count -------- - 90 -(1 row) - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); - create_hash_partitions ------------------------- - 3 -(1 row) - -SELECT create_update_triggers('test_update_trigger.test_hash'); - create_update_triggers ------------------------- - -(1 row) - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - tableoid | val | comment ----------------------------------+-----+--------- - test_update_trigger.test_hash_2 | 1 | 1 - test_update_trigger.test_hash_2 | 1 | 10 - test_update_trigger.test_hash_2 | 1 | 2 - test_update_trigger.test_hash_2 | 1 | 3 - test_update_trigger.test_hash_2 | 1 | 4 - test_update_trigger.test_hash_2 | 1 | 5 - test_update_trigger.test_hash_2 | 1 | 6 - test_update_trigger.test_hash_2 | 1 | 7 - test_update_trigger.test_hash_2 | 1 | 8 - test_update_trigger.test_hash_2 | 1 | 9 -(10 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - tableoid | val | comment -----------+-----+--------- -(0 rows) - -SELECT count(*) FROM test_update_trigger.test_hash; - count -------- - 10 -(1 row) - -DROP SCHEMA test_update_trigger CASCADE; -NOTICE: drop cascades to 18 other objects -DROP EXTENSION pg_pathman; diff --git a/expected/pathman_update_triggers.out b/expected/pathman_update_triggers.out new file mode 100644 index 00000000..40c6a19c --- /dev/null +++ b/expected/pathman_update_triggers.out @@ -0,0 +1,191 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_update_triggers_1.out b/expected/pathman_update_triggers_1.out new file mode 100644 index 00000000..5d26ac1e --- /dev/null +++ b/expected/pathman_update_triggers_1.out @@ -0,0 +1,198 @@ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + create_hash_partitions +------------------------ + 2 +(1 row) + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; +/* + * Statement level triggers + */ +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +update test_update_triggers.test set val = val + 1; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER UPDATE STATEMENT (test) +select count(distinct val) from test_update_triggers.test; + count +------- + 200 +(1 row) + +truncate test_update_triggers.test; +/* + * Row level triggers + */ +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +/* single value */ +insert into test_update_triggers.test values (1); +NOTICE: BEFORE INSERT STATEMENT (test) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 2 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: BEFORE DELETE ROW (test_1) +NOTICE: BEFORE INSERT ROW (test_2) +NOTICE: AFTER DELETE ROW (test_1) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 3 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: AFTER UPDATE ROW (test_2) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 4 | test_update_triggers.test_2 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_2) +NOTICE: BEFORE DELETE ROW (test_2) +NOTICE: BEFORE INSERT ROW (test_1) +NOTICE: AFTER DELETE ROW (test_2) +NOTICE: AFTER INSERT STATEMENT (test) +NOTICE: AFTER INSERT ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 5 | test_update_triggers.test_1 +(1 row) + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +NOTICE: BEFORE UPDATE STATEMENT (test) +NOTICE: BEFORE UPDATE ROW (test_1) +NOTICE: AFTER UPDATE ROW (test_1) +NOTICE: AFTER UPDATE STATEMENT (test) + val | tableoid +-----+----------------------------- + 6 | test_update_triggers.test_1 +(1 row) + +select count(distinct val) from test_update_triggers.test; + count +------- + 1 +(1 row) + +DROP TABLE test_update_triggers.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/expected/pathman_utility_stmt.out b/expected/pathman_utility_stmt.out index 19bad191..1a8b969e 100644 --- a/expected/pathman_utility_stmt.out +++ b/expected/pathman_utility_stmt.out @@ -1,4 +1,5 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; /* * Test COPY @@ -24,70 +25,6 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -1 comment \N \N -2 comment \N \N -3 comment \N \N -4 comment \N \N -5 comment \N \N -6 comment \N \N -7 comment \N \N -8 comment \N \N -9 comment \N \N -10 comment \N \N -11 comment \N \N -12 comment \N \N -13 comment \N \N -14 comment \N \N -15 comment \N \N -16 comment \N \N -17 comment \N \N -18 comment \N \N -19 comment \N \N -20 comment \N \N -\copy copy_stmt_hooking.test to stdout (format csv) -1,comment,, -2,comment,, -3,comment,, -4,comment,, -5,comment,, -6,comment,, -7,comment,, -8,comment,, -9,comment,, -10,comment,, -11,comment,, -12,comment,, -13,comment,, -14,comment,, -15,comment,, -16,comment,, -17,comment,, -18,comment,, -19,comment,, -20,comment,, -\copy copy_stmt_hooking.test(comment) to stdout -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment -comment /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -113,32 +50,30 @@ VACUUM FULL copy_stmt_hooking.test_1; VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -1 -6 -7 -11 -16 -COPY copy_stmt_hooking.test (val, comment) TO stdout; -1 test_1 -6 test_2 -7 test_2 -11 test_3 -16 test_4 -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -0 1 test_1 -0 6 test_2 -0 7 test_2 -0 11 test_3 -0 16 test_4 -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +WARNING: COPY TO will only select rows from parent table "test" +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +1 test_1 0 0 +6 test_2 0 0 +7 test_2 0 0 +11 test_3 0 0 +16 test_4 0 0 +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +1,test_1,0,0 +6,test_2,0,0 +7,test_2,0,0 +11,test_3,0,0 +16,test_4,0,0 +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout 1 test_1 0 0 6 test_2 0 0 7 test_2 0 0 11 test_3 0 0 16 test_4 0 0 -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; ERROR: no suitable partition for key '21' @@ -147,7 +82,7 @@ SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -----+---------+----+---- (0 rows) -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; SELECT * FROM copy_stmt_hooking.test WHERE val > 20; @@ -194,8 +129,8 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; 3 (1 row) -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; 1 0 0 6 0 0 7 0 0 @@ -203,9 +138,9 @@ COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; 16 0 0 21 0 0 26 1 2 -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; /* check tuples from last partition (without dropped column) */ SELECT *, tableoid::REGCLASS FROM copy_stmt_hooking.test ORDER BY val; @@ -279,12 +214,67 @@ SELECT COUNT(*) FROM copy_stmt_hooking.test2; 1 (1 row) -DROP SCHEMA copy_stmt_hooking CASCADE; -NOTICE: drop cascades to 797 other objects +DROP TABLE copy_stmt_hooking.test CASCADE; +NOTICE: drop cascades to 5 other objects +DROP TABLE copy_stmt_hooking.test2 CASCADE; +NOTICE: drop cascades to 790 other objects +DROP SCHEMA copy_stmt_hooking; /* * Test auto check constraint renaming */ CREATE SCHEMA rename; +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); + create_range_partitions +------------------------- + 2 +(1 row) + +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ + regclass +------------------- + rename.parent_seq +(1 row) + +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ + regclass +----------------------- + rename.parent_renamed +(1 row) + +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ + regclass +--------------------------- + rename.parent_renamed_seq +(1 row) + +SELECT append_range_partition('rename.parent_renamed'); /* can append */ + append_range_partition +------------------------- + rename.parent_renamed_3 +(1 row) + +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + regclass +--------------- + rename.parent +(1 row) + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); create_hash_partitions @@ -336,7 +326,9 @@ WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; pathman_test_inh_1_check | CHECK (a < 100) (1 row) -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -364,6 +356,93 @@ WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; pathman_plain_test_renamed_check | CHECK (a < 100) (1 row) -DROP SCHEMA rename CASCADE; -NOTICE: drop cascades to 7 other objects +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +NOTICE: drop cascades to table rename.test_inh_one +DROP TABLE rename.parent CASCADE; +NOTICE: drop cascades to 3 other objects +DROP TABLE rename.test CASCADE; +NOTICE: drop cascades to 3 other objects +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); + create_hash_partitions +------------------------ + 2 +(1 row) + +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; +DROP TABLE drop_index.test CASCADE; +NOTICE: drop cascades to 2 other objects +DROP SCHEMA drop_index; +/* + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla + */ +CREATE SCHEMA test_nonexistance; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +NOTICE: relation "nonexistent_table" does not exist, skipping +/* renaming existent tables already tested earlier (see rename.plain_test) */ +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + i + j +(2 rows) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +NOTICE: column "nonexistent_column" of relation "existent_table" does not exist, skipping +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +------------------------------ + ........pg.dropped.1........ +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; + attname +--------- + j +(1 row) + +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +ERROR: schema "nonexistent_schema" does not exist +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +NOTICE: relation "nonexistent_table" does not exist, skipping +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +ERROR: tablespace "nonexistent_tablespace" does not exist +DROP TABLE test_nonexistance.existent_table; +DROP SCHEMA test_nonexistance; DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views.out b/expected/pathman_views.out new file mode 100644 index 00000000..64b8425d --- /dev/null +++ b/expected/pathman_views.out @@ -0,0 +1,194 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(4 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_1.out b/expected/pathman_views_1.out new file mode 100644 index 00000000..e6bb45f5 --- /dev/null +++ b/expected/pathman_views_1.out @@ -0,0 +1,250 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------------- + LockRows + -> Append + -> Seq Scan on _abc + Filter: (id = 1) + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_2 + Filter: (id = 1) + -> Seq Scan on _abc_3 + Filter: (id = 1) + -> Seq Scan on _abc_4 + Filter: (id = 1) + -> Seq Scan on _abc_5 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 1) + -> Seq Scan on _abc_7 + Filter: (id = 1) + -> Seq Scan on _abc_8 + Filter: (id = 1) + -> Seq Scan on _abc_9 + Filter: (id = 1) +(24 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +---------------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_0 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_1 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_2 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_3 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_4 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_5 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_6 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_7 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_8 + Filter: ((id = 1) OR (id = 2)) + -> Seq Scan on _abc_9 + Filter: ((id = 1) OR (id = 2)) +(25 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_2.out b/expected/pathman_views_2.out new file mode 100644 index 00000000..45ea3eb4 --- /dev/null +++ b/expected/pathman_views_2.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +-------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +-------------------- + Seq Scan on _abc_0 + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +-------------------------- + LockRows + -> Seq Scan on _abc_0 + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +-------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 + Filter: (id = 1) + -> Seq Scan on _abc_6 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +-------------------------------------- + HashAggregate + Group Key: _abc_0.id + -> Append + -> Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +---------------------------------- + HashAggregate + Group Key: _abc_8.id + -> Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_0 + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on _abc_8 + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_3.out b/expected/pathman_views_3.out new file mode 100644 index 00000000..ae50bcb3 --- /dev/null +++ b/expected/pathman_views_3.out @@ -0,0 +1,192 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------------- + Unique + -> Sort + Sort Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(8 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/pathman_views_4.out b/expected/pathman_views_4.out new file mode 100644 index 00000000..8fde5770 --- /dev/null +++ b/expected/pathman_views_4.out @@ -0,0 +1,191 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ +\set VERBOSITY terse +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); + create_hash_partitions +------------------------ + 10 +(1 row) + +insert into views._abc select generate_series(1, 100); +/* create a dummy table */ +create table views._abc_add (like views._abc); +vacuum analyze; +/* create a facade view */ +create view views.abc as select * from views._abc; +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); +/* Test SELECT */ +explain (costs off) select * from views.abc; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 +(11 rows) + +explain (costs off) select * from views.abc where id = 1; + QUERY PLAN +------------------------- + Seq Scan on _abc_0 _abc + Filter: (id = 1) +(2 rows) + +explain (costs off) select * from views.abc where id = 1 for update; + QUERY PLAN +------------------------------- + LockRows + -> Seq Scan on _abc_0 _abc + Filter: (id = 1) +(3 rows) + +select * from views.abc where id = 1 for update; + id +---- + 1 +(1 row) + +select count (*) from views.abc; + count +------- + 100 +(1 row) + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); + QUERY PLAN +--------------- + Insert on abc + -> Result +(2 rows) + +insert into views.abc values (1); +ERROR: INSERT +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Update on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +update views.abc set id = 2 where id = 1 or id = 2; +ERROR: UPDATE +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; + QUERY PLAN +--------------------------------------------- + Delete on abc + -> Result + -> Append + -> Seq Scan on _abc_0 _abc_1 + Filter: (id = 1) + -> Seq Scan on _abc_6 _abc_2 + Filter: (id = 2) +(7 rows) + +delete from views.abc where id = 1 or id = 2; +ERROR: DELETE +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; + QUERY PLAN +---------------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Append + -> Seq Scan on _abc_0 _abc_1 + -> Seq Scan on _abc_1 _abc_2 + -> Seq Scan on _abc_2 _abc_3 + -> Seq Scan on _abc_3 _abc_4 + -> Seq Scan on _abc_4 _abc_5 + -> Seq Scan on _abc_5 _abc_6 + -> Seq Scan on _abc_6 _abc_7 + -> Seq Scan on _abc_7 _abc_8 + -> Seq Scan on _abc_8 _abc_9 + -> Seq Scan on _abc_9 _abc_10 + -> Seq Scan on _abc_add +(15 rows) + +explain (costs off) select * from views.abc_union where id = 5; + QUERY PLAN +------------------------------------- + HashAggregate + Group Key: _abc.id + -> Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(7 rows) + +explain (costs off) table views.abc_union_all; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_0 _abc + -> Seq Scan on _abc_1 + -> Seq Scan on _abc_2 + -> Seq Scan on _abc_3 + -> Seq Scan on _abc_4 + -> Seq Scan on _abc_5 + -> Seq Scan on _abc_6 + -> Seq Scan on _abc_7 + -> Seq Scan on _abc_8 + -> Seq Scan on _abc_9 + -> Seq Scan on _abc_add +(12 rows) + +explain (costs off) select * from views.abc_union_all where id = 5; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on _abc_8 _abc + Filter: (id = 5) + -> Seq Scan on _abc_add + Filter: (id = 5) +(5 rows) + +DROP TABLE views._abc CASCADE; +NOTICE: drop cascades to 13 other objects +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/expected/rollback_on_create_partitions.out b/expected/rollback_on_create_partitions.out index 3531107d..ee0c7c0f 100644 --- a/expected/rollback_on_create_partitions.out +++ b/expected/rollback_on_create_partitions.out @@ -5,64 +5,72 @@ step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data create_partitions show_rel commit show_rel step begin: BEGIN; step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback show_rel step begin: BEGIN; @@ -70,23 +78,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c commit show_rel step begin: BEGIN; @@ -94,23 +118,39 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel rollback show_rel step begin: BEGIN; @@ -118,34 +158,50 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions savepoint_c rollback_b show_rel commit show_rel step begin: BEGIN; @@ -153,44 +209,60 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel rollback show_rel step begin: BEGIN; @@ -198,28 +270,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_a show_rel commit show_rel step begin: BEGIN; @@ -227,28 +316,45 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_a: ROLLBACK TO SAVEPOINT a; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel rollback show_rel step begin: BEGIN; @@ -256,32 +362,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions show_rel savepoint_c rollback_b drop_partitions show_rel commit show_rel step begin: BEGIN; @@ -289,32 +424,61 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step savepoint_c: SAVEPOINT c; step rollback_b: ROLLBACK TO SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel rollback show_rel step begin: BEGIN; @@ -322,37 +486,55 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step rollback: ROLLBACK; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent|partition +------+--------- +(0 rows) -Seq Scan on range_rel starting permutation: begin insert_data savepoint_a create_partitions savepoint_b drop_partitions rollback_a create_partitions show_rel commit show_rel step begin: BEGIN; @@ -360,44 +542,62 @@ step insert_data: INSERT INTO range_rel SELECT generate_series(1, 10000); step savepoint_a: SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) -10 step savepoint_b: SAVEPOINT b; +s1: NOTICE: 1000 rows copied from range_rel_1 +s1: NOTICE: 1000 rows copied from range_rel_2 +s1: NOTICE: 1000 rows copied from range_rel_3 +s1: NOTICE: 1000 rows copied from range_rel_4 +s1: NOTICE: 1000 rows copied from range_rel_5 +s1: NOTICE: 1000 rows copied from range_rel_6 +s1: NOTICE: 1000 rows copied from range_rel_7 +s1: NOTICE: 1000 rows copied from range_rel_8 +s1: NOTICE: 1000 rows copied from range_rel_9 +s1: NOTICE: 1000 rows copied from range_rel_10 step drop_partitions: SELECT drop_partitions('range_rel'); drop_partitions +--------------- + 10 +(1 row) -10 step rollback_a: ROLLBACK TO SAVEPOINT a; step create_partitions: SELECT create_range_partitions('range_rel', 'id', 1, 1000); create_range_partitions +----------------------- + 10 +(1 row) + +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) -10 -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 step commit: COMMIT; -step show_rel: EXPLAIN (COSTS OFF) SELECT * FROM range_rel; -QUERY PLAN - -Append - -> Seq Scan on range_rel_1 - -> Seq Scan on range_rel_2 - -> Seq Scan on range_rel_3 - -> Seq Scan on range_rel_4 - -> Seq Scan on range_rel_5 - -> Seq Scan on range_rel_6 - -> Seq Scan on range_rel_7 - -> Seq Scan on range_rel_8 - -> Seq Scan on range_rel_9 - -> Seq Scan on range_rel_10 +step show_rel: SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; +parent |partition +---------+------------ +range_rel|range_rel_1 +range_rel|range_rel_2 +range_rel|range_rel_3 +range_rel|range_rel_4 +range_rel|range_rel_5 +range_rel|range_rel_6 +range_rel|range_rel_7 +range_rel|range_rel_8 +range_rel|range_rel_9 +range_rel|range_rel_10 +(10 rows) + diff --git a/expected/test_variants.sh b/expected/test_variants.sh new file mode 100755 index 00000000..46bf2817 --- /dev/null +++ b/expected/test_variants.sh @@ -0,0 +1,27 @@ +#!/usr/bin/bash + +ret=0 + +red="\033[0;31m" +reset='\033[0m' + +shopt -s extglob + +for result in ./*_+([0-9]).out; do + f1="$result" + f2="${f1//_+([0-9])/}" + + printf "examine $(basename $f1) \n" + + file_diff=$(diff $f1 $f2 | wc -l) + + if [ $file_diff -eq 0 ]; then + printf $red + printf "WARNING: $(basename $f1) is redundant \n" >&2 + printf $reset + + ret=1 # change exit code + fi +done + +exit $ret diff --git a/hash.sql b/hash.sql index 6bfd77a5..b22fd75e 100644 --- a/hash.sql +++ b/hash.sql @@ -3,7 +3,7 @@ * hash.sql * HASH partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Creates hash partitions for specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( +CREATE FUNCTION @extschema@.create_hash_partitions( parent_relid REGCLASS, expression TEXT, partitions_count INT4, @@ -20,7 +20,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions( tablespaces TEXT[] DEFAULT NULL) RETURNS INTEGER AS $$ BEGIN - expression := lower(expression); PERFORM @extschema@.prepare_for_partitioning(parent_relid, expression, partition_data); @@ -54,7 +53,7 @@ SET client_min_messages = WARNING; * * lock_parent - should we take an exclusive lock? */ -CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( +CREATE FUNCTION @extschema@.replace_hash_partition( old_partition REGCLASS, new_partition REGCLASS, lock_parent BOOL DEFAULT TRUE) @@ -95,9 +94,11 @@ BEGIN END IF; /* Check that new partition has an equal structure as parent does */ - IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, new_partition); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; + END; /* Check that table is partitioned */ IF @extschema@.get_partition_key(parent_relid) IS NULL THEN @@ -109,18 +110,18 @@ BEGIN /* Fetch definition of old_partition's HASH constraint */ SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint - WHERE conrelid = old_partition AND conname = old_constr_name + WHERE conrelid = old_partition AND pg_catalog.quote_ident(conname) = old_constr_name INTO old_constr_def; /* Detach old partition */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', old_partition, old_constr_name); /* Attach the new one */ - EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s %s', new_partition, @extschema@.build_check_constraint_name(new_partition::REGCLASS), old_constr_def); @@ -145,7 +146,7 @@ $$ LANGUAGE plpgsql; /* * Just create HASH partitions, called by create_hash_partitions(). */ -CREATE OR REPLACE FUNCTION @extschema@.create_hash_partitions_internal( +CREATE FUNCTION @extschema@.create_hash_partitions_internal( parent_relid REGCLASS, attribute TEXT, partitions_count INT4, @@ -157,14 +158,14 @@ LANGUAGE C; /* * Calculates hash for integer value */ -CREATE OR REPLACE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) +CREATE FUNCTION @extschema@.get_hash_part_idx(INT4, INT4) RETURNS INTEGER AS 'pg_pathman', 'get_hash_part_idx' LANGUAGE C STRICT; /* * Build hash condition for a CHECK CONSTRAINT */ -CREATE OR REPLACE FUNCTION @extschema@.build_hash_condition( +CREATE FUNCTION @extschema@.build_hash_condition( attribute_type REGTYPE, attribute TEXT, partitions_count INT4, diff --git a/init.sql b/init.sql index 181a81a7..123b2a36 100644 --- a/init.sql +++ b/init.sql @@ -3,7 +3,7 @@ * init.sql * Creates config table and provides common utility functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -14,12 +14,11 @@ * to partitioning key. The function throws an error if it fails to convert * text to Datum */ -CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( +CREATE FUNCTION @extschema@.validate_interval_value( partrel REGCLASS, expr TEXT, parttype INTEGER, - range_interval TEXT, - cooked_expr TEXT) + range_interval TEXT) RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' LANGUAGE C; @@ -32,12 +31,11 @@ LANGUAGE C; * range_interval - base interval for RANGE partitioning as string * cooked_expr - cooked partitioning expression (parsed & rewritten) */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( +CREATE TABLE @extschema@.pathman_config ( partrel REGCLASS NOT NULL PRIMARY KEY, expr TEXT NOT NULL, parttype INTEGER NOT NULL, range_interval TEXT DEFAULT NULL, - cooked_expr TEXT DEFAULT NULL, /* check for allowed part types */ CONSTRAINT pathman_config_parttype_check CHECK (parttype IN (1, 2)), @@ -47,8 +45,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( CHECK (@extschema@.validate_interval_value(partrel, expr, parttype, - range_interval, - cooked_expr)) + range_interval)) ); @@ -58,7 +55,7 @@ CREATE TABLE IF NOT EXISTS @extschema@.pathman_config ( * * NOTE: this function is used in CHECK CONSTRAINT. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_part_callback( +CREATE FUNCTION @extschema@.validate_part_callback( callback REGPROCEDURE, raise_error BOOL DEFAULT TRUE) RETURNS BOOL AS 'pg_pathman', 'validate_part_callback_pl' @@ -73,7 +70,7 @@ LANGUAGE C STRICT; * init_callback - text signature of cb to be executed on partition creation * spawn_using_bgw - use background worker in order to auto create partitions */ -CREATE TABLE IF NOT EXISTS @extschema@.pathman_config_params ( +CREATE TABLE @extschema@.pathman_config_params ( partrel REGCLASS NOT NULL PRIMARY KEY, enable_parent BOOLEAN NOT NULL DEFAULT FALSE, auto BOOLEAN NOT NULL DEFAULT TRUE, @@ -94,7 +91,7 @@ TO public; /* * Check if current user can alter/drop specified relation */ -CREATE OR REPLACE FUNCTION @extschema@.check_security_policy(relation regclass) +CREATE FUNCTION @extschema@.check_security_policy(relation regclass) RETURNS BOOL AS 'pg_pathman', 'check_security_policy' LANGUAGE C STRICT; /* @@ -114,9 +111,9 @@ ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; ALTER TABLE @extschema@.pathman_config_params ENABLE ROW LEVEL SECURITY; /* - * Invalidate relcache every time someone changes parameters config. + * Invalidate relcache every time someone changes parameters config or pathman_config */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_config_params_trigger_func() +CREATE FUNCTION @extschema@.pathman_config_params_trigger_func() RETURNS TRIGGER AS 'pg_pathman', 'pathman_config_params_trigger_func' LANGUAGE C; @@ -124,6 +121,10 @@ CREATE TRIGGER pathman_config_params_trigger AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config_params FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + /* * Enable dump of config tables with pg_dump. */ @@ -134,13 +135,13 @@ SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config_params', /* * Add a row describing the optional parameter to pathman_config_params. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_set_param( +CREATE FUNCTION @extschema@.pathman_set_param( relation REGCLASS, param TEXT, value ANYELEMENT) RETURNS VOID AS $$ BEGIN - EXECUTE format('INSERT INTO @extschema@.pathman_config_params + EXECUTE pg_catalog.format('INSERT INTO @extschema@.pathman_config_params (partrel, %1$s) VALUES ($1, $2) ON CONFLICT (partrel) DO UPDATE SET %1$s = $2', param) USING relation, value; @@ -150,7 +151,7 @@ $$ LANGUAGE plpgsql; /* * Include\exclude parent relation in query plan. */ -CREATE OR REPLACE FUNCTION @extschema@.set_enable_parent( +CREATE FUNCTION @extschema@.set_enable_parent( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -162,7 +163,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Enable\disable automatic partition creation. */ -CREATE OR REPLACE FUNCTION @extschema@.set_auto( +CREATE FUNCTION @extschema@.set_auto( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -174,7 +175,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set partition creation callback */ -CREATE OR REPLACE FUNCTION @extschema@.set_init_callback( +CREATE FUNCTION @extschema@.set_init_callback( relation REGCLASS, callback REGPROCEDURE DEFAULT 0) RETURNS VOID AS $$ @@ -185,10 +186,10 @@ BEGIN /* Fetch schema-qualified name of callback */ IF callback != 0 THEN - SELECT quote_ident(nspname) || '.' || - quote_ident(proname) || '(' || - (SELECT string_agg(x.argtype::REGTYPE::TEXT, ',') - FROM unnest(proargtypes) AS x(argtype)) || + SELECT pg_catalog.quote_ident(nspname) || '.' || + pg_catalog.quote_ident(proname) || '(' || + (SELECT pg_catalog.string_agg(x.argtype::REGTYPE::TEXT, ',') + FROM pg_catalog.unnest(proargtypes) AS x(argtype)) || ')' FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace @@ -203,7 +204,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set 'spawn using BGW' option */ -CREATE OR REPLACE FUNCTION @extschema@.set_spawn_using_bgw( +CREATE FUNCTION @extschema@.set_spawn_using_bgw( relation REGCLASS, value BOOLEAN) RETURNS VOID AS $$ @@ -215,7 +216,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set (or reset) default interval for auto created partitions */ -CREATE OR REPLACE FUNCTION @extschema@.set_interval( +CREATE FUNCTION @extschema@.set_interval( relation REGCLASS, value ANYELEMENT) RETURNS VOID AS $$ @@ -239,7 +240,7 @@ $$ LANGUAGE plpgsql; /* * Show all existing parents and partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.show_partition_list() +CREATE FUNCTION @extschema@.show_partition_list() RETURNS TABLE ( parent REGCLASS, partition REGCLASS, @@ -253,7 +254,7 @@ LANGUAGE C STRICT; /* * View for show_partition_list(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_partition_list +CREATE VIEW @extschema@.pathman_partition_list AS SELECT * FROM @extschema@.show_partition_list(); GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; @@ -261,7 +262,7 @@ GRANT SELECT ON @extschema@.pathman_partition_list TO PUBLIC; /* * Show memory usage of pg_pathman's caches. */ -CREATE OR REPLACE FUNCTION @extschema@.show_cache_stats() +CREATE FUNCTION @extschema@.show_cache_stats() RETURNS TABLE ( context TEXT, size INT8, @@ -273,19 +274,19 @@ LANGUAGE C STRICT; /* * View for show_cache_stats(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_cache_stats +CREATE VIEW @extschema@.pathman_cache_stats AS SELECT * FROM @extschema@.show_cache_stats(); /* * Show all existing concurrent partitioning tasks. */ -CREATE OR REPLACE FUNCTION @extschema@.show_concurrent_part_tasks() +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() RETURNS TABLE ( userid REGROLE, pid INT, dbid OID, relid REGCLASS, - processed INT, + processed INT8, status TEXT) AS 'pg_pathman', 'show_concurrent_part_tasks_internal' LANGUAGE C STRICT; @@ -293,7 +294,7 @@ LANGUAGE C STRICT; /* * View for show_concurrent_part_tasks(). */ -CREATE OR REPLACE VIEW @extschema@.pathman_concurrent_part_tasks +CREATE VIEW @extschema@.pathman_concurrent_part_tasks AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; @@ -301,7 +302,7 @@ GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; /* * Partition table using ConcurrentPartWorker. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_table_concurrently( +CREATE FUNCTION @extschema@.partition_table_concurrently( relation REGCLASS, batch_size INTEGER DEFAULT 1000, sleep_time FLOAT8 DEFAULT 1.0) @@ -311,7 +312,7 @@ LANGUAGE C STRICT; /* * Stop concurrent partitioning task. */ -CREATE OR REPLACE FUNCTION @extschema@.stop_concurrent_part_task( +CREATE FUNCTION @extschema@.stop_concurrent_part_task( relation REGCLASS) RETURNS BOOL AS 'pg_pathman', 'stop_concurrent_part_task' LANGUAGE C STRICT; @@ -320,7 +321,7 @@ LANGUAGE C STRICT; /* * Copy rows to partitions concurrently. */ -CREATE OR REPLACE FUNCTION @extschema@._partition_data_concurrent( +CREATE FUNCTION @extschema@._partition_data_concurrent( relation REGCLASS, p_min ANYELEMENT DEFAULT NULL::text, p_max ANYELEMENT DEFAULT NULL::text, @@ -340,19 +341,19 @@ BEGIN /* Format LIMIT clause if needed */ IF NOT p_limit IS NULL THEN - v_limit_clause := format('LIMIT %s', p_limit); + v_limit_clause := pg_catalog.format('LIMIT %s', p_limit); END IF; /* Format WHERE clause if needed */ IF NOT p_min IS NULL THEN - v_where_clause := format('%1$s >= $1', part_expr); + v_where_clause := pg_catalog.format('%1$s >= $1', part_expr); END IF; IF NOT p_max IS NULL THEN IF NOT p_min IS NULL THEN v_where_clause := v_where_clause || ' AND '; END IF; - v_where_clause := v_where_clause || format('%1$s < $2', part_expr); + v_where_clause := v_where_clause || pg_catalog.format('%1$s < $2', part_expr); END IF; IF v_where_clause != '' THEN @@ -361,12 +362,12 @@ BEGIN /* Lock rows and copy data */ RAISE NOTICE 'Copying data to partitions...'; - EXECUTE format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', + EXECUTE pg_catalog.format('SELECT array(SELECT ctid FROM ONLY %1$s %2$s %3$s FOR UPDATE NOWAIT)', relation, v_where_clause, v_limit_clause) USING p_min, p_max INTO ctids; - EXECUTE format('WITH data AS ( + EXECUTE pg_catalog.format('WITH data AS ( DELETE FROM ONLY %1$s WHERE ctid = ANY($1) RETURNING *) INSERT INTO %1$s SELECT * FROM data', relation) @@ -382,7 +383,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Old school way to distribute rows to partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.partition_data( +CREATE FUNCTION @extschema@.partition_data( parent_relid REGCLASS, OUT p_total BIGINT) AS $$ @@ -390,7 +391,7 @@ BEGIN p_total := 0; /* Create partitions and copy rest of the data */ - EXECUTE format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) + EXECUTE pg_catalog.format('WITH part_data AS (DELETE FROM ONLY %1$s RETURNING *) INSERT INTO %1$s SELECT * FROM part_data', parent_relid::TEXT); @@ -404,7 +405,7 @@ SET pg_pathman.enable_partitionfilter = on; /* ensures that PartitionFilter is O /* * Disable pathman partitioning for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( +CREATE FUNCTION @extschema@.disable_pathman_for( parent_relid REGCLASS) RETURNS VOID AS $$ BEGIN @@ -413,16 +414,13 @@ BEGIN /* Delete rows from both config tables */ DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; - - /* Drop triggers on update */ - PERFORM @extschema@.drop_triggers(parent_relid); END $$ LANGUAGE plpgsql STRICT; /* * Check a few things and take locks before partitioning. */ -CREATE OR REPLACE FUNCTION @extschema@.prepare_for_partitioning( +CREATE FUNCTION @extschema@.prepare_for_partitioning( parent_relid REGCLASS, expression TEXT, partition_data BOOLEAN) @@ -457,6 +455,10 @@ BEGIN RAISE EXCEPTION 'table "%" has already been partitioned', parent_relid; END IF; + IF EXISTS (SELECT 1 FROM pg_catalog.pg_inherits WHERE inhparent = parent_relid) THEN + RAISE EXCEPTION 'can''t partition table "%" with existing children', parent_relid; + END IF; + /* Check if there are foreign keys that reference the relation */ FOR constr_name IN (SELECT conname FROM pg_catalog.pg_constraint WHERE confrelid = parent_relid::REGCLASS::OID) @@ -476,7 +478,7 @@ $$ LANGUAGE plpgsql; /* * Returns relname without quotes or something. */ -CREATE OR REPLACE FUNCTION @extschema@.get_plain_schema_and_relname( +CREATE FUNCTION @extschema@.get_plain_schema_and_relname( cls REGCLASS, OUT schema TEXT, OUT relname TEXT) @@ -492,7 +494,7 @@ $$ LANGUAGE plpgsql STRICT; /* * DDL trigger that removes entry from pathman_config table. */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_ddl_trigger_func() +CREATE FUNCTION @extschema@.pathman_ddl_trigger_func() RETURNS event_trigger AS $$ DECLARE obj RECORD; @@ -503,8 +505,8 @@ BEGIN pg_class_oid = 'pg_catalog.pg_class'::regclass; /* Find relids to remove from config */ - SELECT array_agg(cfg.partrel) INTO relids - FROM pg_event_trigger_dropped_objects() AS events + SELECT pg_catalog.array_agg(cfg.partrel) INTO relids + FROM pg_catalog.pg_event_trigger_dropped_objects() AS events JOIN @extschema@.pathman_config AS cfg ON cfg.partrel::oid = events.objid WHERE events.classid = pg_class_oid AND events.objsubid = 0; @@ -516,78 +518,11 @@ BEGIN END $$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( - parent_relid REGCLASS) -RETURNS TEXT AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); - EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); - - RETURN seq_name; -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - -CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - seq_name TEXT; - -BEGIN - seq_name := @extschema@.build_sequence_name(parent_relid); - - EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); -END -$$ LANGUAGE plpgsql -SET client_min_messages = WARNING; /* mute NOTICE message */ - -/* - * Drop triggers - */ -CREATE OR REPLACE FUNCTION @extschema@.drop_triggers( - parent_relid REGCLASS) -RETURNS VOID AS $$ -DECLARE - triggername TEXT; - relation OID; - -BEGIN - triggername := @extschema@.build_update_trigger_name(parent_relid); - - /* Drop trigger for each partition if exists */ - FOR relation IN (SELECT pg_catalog.pg_inherits.inhrelid - FROM pg_catalog.pg_inherits - JOIN pg_catalog.pg_trigger ON inhrelid = tgrelid - WHERE inhparent = parent_relid AND tgname = triggername) - LOOP - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - relation::REGCLASS); - END LOOP; - - /* Drop trigger on parent */ - IF EXISTS (SELECT * FROM pg_catalog.pg_trigger - WHERE tgname = triggername AND tgrelid = parent_relid) - THEN - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - triggername, - parent_relid::TEXT); - END IF; -END -$$ LANGUAGE plpgsql STRICT; - /* * Drop partitions. If delete_data set to TRUE, partitions * will be dropped with all the data. */ -CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( +CREATE FUNCTION @extschema@.drop_partitions( parent_relid REGCLASS, delete_data BOOLEAN DEFAULT FALSE) RETURNS INTEGER AS $$ @@ -608,9 +543,6 @@ BEGIN RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; END IF; - /* First, drop all triggers */ - PERFORM @extschema@.drop_triggers(parent_relid); - /* Also drop naming sequence */ PERFORM @extschema@.drop_naming_sequence(parent_relid); @@ -620,7 +552,7 @@ BEGIN ORDER BY inhrelid ASC) LOOP IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, child::TEXT); GET DIAGNOSTICS rows_count = ROW_COUNT; @@ -639,9 +571,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF rel_kind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', child); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', child); ELSE - EXECUTE format('DROP TABLE %s', child); + EXECUTE pg_catalog.format('DROP TABLE %s', child); END IF; part_count := part_count + 1; @@ -660,7 +592,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Copy all of parent's foreign keys. */ -CREATE OR REPLACE FUNCTION @extschema@.copy_foreign_keys( +CREATE FUNCTION @extschema@.copy_foreign_keys( parent_relid REGCLASS, partition_relid REGCLASS) RETURNS VOID AS $$ @@ -674,7 +606,7 @@ BEGIN FOR conid IN (SELECT oid FROM pg_catalog.pg_constraint WHERE conrelid = parent_relid AND contype = 'f') LOOP - EXECUTE format('ALTER TABLE %s ADD %s', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD %s', partition_relid::TEXT, pg_catalog.pg_get_constraintdef(conid)); END LOOP; @@ -685,7 +617,7 @@ $$ LANGUAGE plpgsql STRICT; /* * Set new relname, schema and tablespace */ -CREATE OR REPLACE FUNCTION @extschema@.alter_partition( +CREATE FUNCTION @extschema@.alter_partition( relation REGCLASS, new_name TEXT, new_schema REGNAMESPACE, @@ -702,55 +634,22 @@ BEGIN /* Alter table name */ IF new_name != orig_name THEN - EXECUTE format('ALTER TABLE %s RENAME TO %s', relation, new_name); + EXECUTE pg_catalog.format('ALTER TABLE %s RENAME TO %s', relation, new_name); END IF; /* Alter table schema */ IF new_schema != orig_schema THEN - EXECUTE format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); + EXECUTE pg_catalog.format('ALTER TABLE %s SET SCHEMA %s', relation, new_schema); END IF; /* Move to another tablespace */ IF NOT new_tablespace IS NULL THEN - EXECUTE format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); + EXECUTE pg_catalog.format('ALTER TABLE %s SET TABLESPACE %s', relation, new_tablespace); END IF; END $$ LANGUAGE plpgsql; -/* - * Function for UPDATE triggers. - */ -CREATE OR REPLACE FUNCTION @extschema@.pathman_update_trigger_func() -RETURNS TRIGGER AS 'pg_pathman', 'pathman_update_trigger_func' -LANGUAGE C STRICT; - -/* - * Creates UPDATE triggers. - */ -CREATE OR REPLACE FUNCTION @extschema@.create_update_triggers( - parent_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_update_triggers' -LANGUAGE C STRICT; - -/* - * Creates single UPDATE trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_update_trigger( - parent_relid REGCLASS, - partition_relid REGCLASS) -RETURNS VOID AS 'pg_pathman', 'create_single_update_trigger' -LANGUAGE C STRICT; - -/* - * Check if relation has pg_pathman's UPDATE trigger. - */ -CREATE OR REPLACE FUNCTION @extschema@.has_update_trigger( - parent_relid REGCLASS) -RETURNS BOOL AS 'pg_pathman', 'has_update_trigger' -LANGUAGE C STRICT; - - /* * Create DDL trigger to call pathman_ddl_trigger_func(). */ @@ -760,48 +659,64 @@ EXECUTE PROCEDURE @extschema@.pathman_ddl_trigger_func(); /* - * Partitioning key. + * Get partitioning key. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key( - relid REGCLASS) +CREATE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) RETURNS TEXT AS $$ - SELECT expr FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; /* - * Partitioning key type. + * Get partitioning key type. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_key_type( - relid REGCLASS) -RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type' +CREATE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' LANGUAGE C STRICT; /* - * Partitioning type. + * Get parsed and analyzed expression. */ -CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( - relid REGCLASS) +CREATE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + +/* + * Get partitioning type. + */ +CREATE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) RETURNS INT4 AS $$ - SELECT parttype FROM @extschema@.pathman_config WHERE partrel = relid; + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; $$ LANGUAGE sql STRICT; - /* * Get number of partitions managed by pg_pathman. */ -CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( - parent_relid REGCLASS) -RETURNS INT4 AS 'pg_pathman', 'get_number_of_partitions_pl' -LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT pg_catalog.count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; /* * Get parent of pg_pathman's partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_parent_of_partition( +CREATE FUNCTION @extschema@.get_parent_of_partition( partition_relid REGCLASS) RETURNS REGCLASS AS 'pg_pathman', 'get_parent_of_partition_pl' LANGUAGE C STRICT; @@ -809,7 +724,7 @@ LANGUAGE C STRICT; /* * Extract basic type of a domain. */ -CREATE OR REPLACE FUNCTION @extschema@.get_base_type( +CREATE FUNCTION @extschema@.get_base_type( typid REGTYPE) RETURNS REGTYPE AS 'pg_pathman', 'get_base_type_pl' LANGUAGE C STRICT; @@ -817,7 +732,7 @@ LANGUAGE C STRICT; /* * Return tablespace name for specified relation. */ -CREATE OR REPLACE FUNCTION @extschema@.get_tablespace( +CREATE FUNCTION @extschema@.get_tablespace( relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'get_tablespace_pl' LANGUAGE C STRICT; @@ -826,7 +741,7 @@ LANGUAGE C STRICT; /* * Check that relation exists. */ -CREATE OR REPLACE FUNCTION @extschema@.validate_relname( +CREATE FUNCTION @extschema@.validate_relname( relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'validate_relname' LANGUAGE C; @@ -834,7 +749,7 @@ LANGUAGE C; /* * Check that expression is valid */ -CREATE OR REPLACE FUNCTION @extschema@.validate_expression( +CREATE FUNCTION @extschema@.validate_expression( relid REGCLASS, expression TEXT) RETURNS VOID AS 'pg_pathman', 'validate_expression' @@ -843,7 +758,7 @@ LANGUAGE C; /* * Check if regclass is date or timestamp. */ -CREATE OR REPLACE FUNCTION @extschema@.is_date_type( +CREATE FUNCTION @extschema@.is_date_type( typid REGTYPE) RETURNS BOOLEAN AS 'pg_pathman', 'is_date_type' LANGUAGE C STRICT; @@ -851,7 +766,7 @@ LANGUAGE C STRICT; /* * Check if TYPE supports the specified operator. */ -CREATE OR REPLACE FUNCTION @extschema@.is_operator_supported( +CREATE FUNCTION @extschema@.is_operator_supported( type_oid REGTYPE, opname TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'is_operator_supported' @@ -860,7 +775,7 @@ LANGUAGE C STRICT; /* * Check if tuple from first relation can be converted to fit the second one. */ -CREATE OR REPLACE FUNCTION @extschema@.is_tuple_convertible( +CREATE FUNCTION @extschema@.is_tuple_convertible( relation1 REGCLASS, relation2 REGCLASS) RETURNS BOOL AS 'pg_pathman', 'is_tuple_convertible' @@ -870,44 +785,27 @@ LANGUAGE C STRICT; /* * Build check constraint name for a specified relation's column. */ -CREATE OR REPLACE FUNCTION @extschema@.build_check_constraint_name( +CREATE FUNCTION @extschema@.build_check_constraint_name( partition_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_check_constraint_name' LANGUAGE C STRICT; -/* - * Build UPDATE trigger's name. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_name( - relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_name' -LANGUAGE C STRICT; - -/* - * Buld UPDATE trigger function's name. - */ -CREATE OR REPLACE FUNCTION @extschema@.build_update_trigger_func_name( - relid REGCLASS) -RETURNS TEXT AS 'pg_pathman', 'build_update_trigger_func_name' -LANGUAGE C STRICT; - - /* * Add record to pathman_config (RANGE) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT, - range_interval TEXT) +CREATE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT, + range_interval TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; /* * Add record to pathman_config (HASH) and validate partitions. */ -CREATE OR REPLACE FUNCTION @extschema@.add_to_pathman_config( - parent_relid REGCLASS, - expression TEXT) +CREATE FUNCTION @extschema@.add_to_pathman_config( + parent_relid REGCLASS, + expression TEXT) RETURNS BOOLEAN AS 'pg_pathman', 'add_to_pathman_config' LANGUAGE C; @@ -916,7 +814,7 @@ LANGUAGE C; * Lock partitioned relation to restrict concurrent * modification of partitioning scheme. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_part_modification( +CREATE FUNCTION @extschema@.prevent_part_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_part_modification' LANGUAGE C STRICT; @@ -924,7 +822,7 @@ LANGUAGE C STRICT; /* * Lock relation to restrict concurrent modification of data. */ -CREATE OR REPLACE FUNCTION @extschema@.prevent_data_modification( +CREATE FUNCTION @extschema@.prevent_data_modification( parent_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'prevent_data_modification' LANGUAGE C STRICT; @@ -933,7 +831,7 @@ LANGUAGE C STRICT; /* * Invoke init_callback on RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE, @@ -945,21 +843,20 @@ LANGUAGE C; /* * Invoke init_callback on HASH partition. */ -CREATE OR REPLACE FUNCTION @extschema@.invoke_on_partition_created_callback( +CREATE FUNCTION @extschema@.invoke_on_partition_created_callback( parent_relid REGCLASS, partition_relid REGCLASS, init_callback REGPROCEDURE) RETURNS VOID AS 'pg_pathman', 'invoke_on_partition_created_callback' LANGUAGE C; - /* * DEBUG: Place this inside some plpgsql fuction and set breakpoint. */ -CREATE OR REPLACE FUNCTION @extschema@.debug_capture() +CREATE FUNCTION @extschema@.debug_capture() RETURNS VOID AS 'pg_pathman', 'debug_capture' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.get_pathman_lib_version() -RETURNS CSTRING AS 'pg_pathman', 'get_pathman_lib_version' +CREATE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' LANGUAGE C STRICT; diff --git a/mk_dockerfile.sh b/mk_dockerfile.sh new file mode 100755 index 00000000..f15433c4 --- /dev/null +++ b/mk_dockerfile.sh @@ -0,0 +1,16 @@ +if [ -z ${PG_VERSION+x} ]; then + echo PG_VERSION is not set! + exit 1 +fi + +if [ -z ${LEVEL+x} ]; then + LEVEL=scan-build +fi + +echo PG_VERSION=${PG_VERSION} +echo LEVEL=${LEVEL} + +sed \ + -e 's/${PG_VERSION}/'${PG_VERSION}/g \ + -e 's/${LEVEL}/'${LEVEL}/g \ + Dockerfile.tmpl > Dockerfile diff --git a/patches/REL_11_STABLE-pg_pathman-core.diff b/patches/REL_11_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b3b08e0a --- /dev/null +++ b/patches/REL_11_STABLE-pg_pathman-core.diff @@ -0,0 +1,53 @@ +diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c +index 6384ac940d8..8b4f731e7a8 100644 +--- a/src/backend/jit/llvm/llvmjit_deform.c ++++ b/src/backend/jit/llvm/llvmjit_deform.c +@@ -104,6 +104,10 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, int natts) + + int attnum; + ++ /* don't generate code for tuples without user attributes */ ++ if (desc->natts == 0) ++ return NULL; ++ + mod = llvm_mutable_module(context); + + funcname = llvm_expand_funcname(context, "deform"); +diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c +index 12138e49577..8638ebc4ba1 100644 +--- a/src/backend/jit/llvm/llvmjit_expr.c ++++ b/src/backend/jit/llvm/llvmjit_expr.c +@@ -274,6 +274,7 @@ llvm_compile_expr(ExprState *state) + LLVMValueRef v_slot; + LLVMBasicBlockRef b_fetch; + LLVMValueRef v_nvalid; ++ LLVMValueRef l_jit_deform = NULL; + + b_fetch = l_bb_before_v(opblocks[i + 1], + "op.%d.fetch", i); +@@ -336,17 +337,20 @@ llvm_compile_expr(ExprState *state) + */ + if (desc && (context->base.flags & PGJIT_DEFORM)) + { +- LLVMValueRef params[1]; +- LLVMValueRef l_jit_deform; +- + l_jit_deform = +- slot_compile_deform(context, desc, ++ slot_compile_deform(context, ++ desc, + op->d.fetch.last_var); ++ } ++ ++ if (l_jit_deform) ++ { ++ LLVMValueRef params[1]; ++ + params[0] = v_slot; + + LLVMBuildCall(b, l_jit_deform, + params, lengthof(params), ""); +- + } + else + { diff --git a/patches/REL_14_STABLE-pg_pathman-core.diff b/patches/REL_14_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..a6ac1afa --- /dev/null +++ b/patches/REL_14_STABLE-pg_pathman-core.diff @@ -0,0 +1,513 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index f27e458482..ea47c341c1 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -32,6 +32,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index bf551b0395..10d2044ae6 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -76,7 +76,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index bdf59a10fc..972453d9a5 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1799,6 +1799,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index b3ce4bae53..8f2bb12542 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -824,6 +824,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2713,6 +2720,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 55c430c9ec..21d9e6304a 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -510,7 +510,7 @@ ExecInitInsertProjection(ModifyTableState *mtstate, + * This is also a convenient place to verify that the output of an UPDATE + * matches the target table (ExecBuildUpdateProjection does that). + */ +-static void ++void + ExecInitUpdateProjection(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) + { +@@ -2486,6 +2486,7 @@ ExecModifyTable(PlanState *pstate) + ItemPointerData tuple_ctid; + HeapTupleData oldtupdata; + HeapTuple oldtuple; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -2523,12 +2524,23 @@ ExecModifyTable(PlanState *pstate) + resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex; + subplanstate = outerPlanState(node); + ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; ++ + /* + * Fetch rows from subplan, and execute the required table modification + * for each row. + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contains original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -2562,7 +2574,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + elog(ERROR, "tableoid is NULL"); +@@ -2581,6 +2595,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -2590,6 +2606,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -2619,7 +2636,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2649,7 +2667,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -2680,8 +2699,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); +- slot = ExecInsert(node, resultRelInfo, slot, planSlot, ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, planSlot); ++ slot = ExecInsert(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, planSlot, + estate, node->canSetTag); + break; + case CMD_UPDATE: +@@ -2689,6 +2712,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -2712,14 +2742,19 @@ ExecModifyTable(PlanState *pstate) + } + slot = ExecGetUpdateNewTuple(resultRelInfo, planSlot, + oldSlot); ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot, ++ slot = ExecUpdate(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, slot, + planSlot, &node->mt_epqstate, estate, + node->canSetTag); + break; + case CMD_DELETE: +- slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(node, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + planSlot, &node->mt_epqstate, estate, + true, /* processReturning */ + node->canSetTag, +@@ -2736,7 +2771,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -2752,6 +2790,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -2826,6 +2865,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -2922,6 +2962,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -3002,6 +3049,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete, there will be a junk attribute + * named "tableoid" present in the subplan's targetlist. It will be used +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 381d9e548d..0a4657d291 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 5af78bd0dc..0c13bc9d83 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,7 +53,9 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern bool DefaultXactReadOnly; +-extern bool XactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY ++extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ + extern bool xact_is_sampled; +diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h +index 2b4e104bb9..80d1274efe 100644 +--- a/src/include/catalog/objectaddress.h ++++ b/src/include/catalog/objectaddress.h +@@ -28,7 +28,7 @@ typedef struct ObjectAddress + int32 objectSubId; /* Subitem within object (eg column), or 0 */ + } ObjectAddress; + +-extern const ObjectAddress InvalidObjectAddress; ++extern PGDLLIMPORT const ObjectAddress InvalidObjectAddress; + + #define ObjectAddressSubSet(addr, class_id, object_id, object_sub_id) \ + do { \ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 3dc03c913e..1002d97499 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -657,5 +657,7 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++extern void ExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h +index 4acb1cda6e..fd8d38347d 100644 +--- a/src/include/libpq/libpq-be.h ++++ b/src/include/libpq/libpq-be.h +@@ -327,7 +327,7 @@ extern ssize_t be_gssapi_read(Port *port, void *ptr, size_t len); + extern ssize_t be_gssapi_write(Port *port, void *ptr, size_t len); + #endif /* ENABLE_GSS */ + +-extern ProtocolVersion FrontendProtocol; ++extern PGDLLIMPORT ProtocolVersion FrontendProtocol; + + /* TCP keepalives configuration. These are no-ops on an AF_UNIX socket. */ + +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index ee5ad3c058..dc474819d7 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -592,6 +592,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/include/utils/snapmgr.h b/src/include/utils/snapmgr.h +index 33e6c14e81..abd9bba23e 100644 +--- a/src/include/utils/snapmgr.h ++++ b/src/include/utils/snapmgr.h +@@ -53,7 +53,7 @@ extern TimestampTz GetSnapshotCurrentTimestamp(void); + extern TimestampTz GetOldSnapshotThresholdTimestamp(void); + extern void SnapshotTooOldMagicForTest(void); + +-extern bool FirstSnapshotSet; ++extern PGDLLIMPORT bool FirstSnapshotSet; + + extern PGDLLIMPORT TransactionId TransactionXmin; + extern PGDLLIMPORT TransactionId RecentXmin; +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index de22c9ba2c..c8be5323b8 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -608,7 +620,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 9b6539fb15..f8a67c6701 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -41,7 +41,10 @@ my @contrib_uselibpq = + my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; ++my $contrib_extraincludes = { ++ 'dblink' => ['src/backend'], ++ 'pg_pathman' => ['contrib/pg_pathman/src/include'] ++}; + my $contrib_extrasource = { + 'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ], + 'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], +@@ -973,6 +976,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + } + elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg) +@@ -1002,6 +1006,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1026,23 +1043,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/patches/REL_15_STABLE-pg_pathman-core.diff b/patches/REL_15_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..b8db29fd --- /dev/null +++ b/patches/REL_15_STABLE-pg_pathman-core.diff @@ -0,0 +1,487 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 7a3d9b4b01..0c3d2dec6c 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -78,7 +78,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 87c7603f2b..9cc0bc0da8 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1801,6 +1801,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 0ba61fd547..29d93998b2 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -826,6 +826,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2849,6 +2856,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_junkFilter = parentestate->es_junkFilter; + rcestate->es_output_cid = parentestate->es_output_cid; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index 1ad5dcb406..047508e0da 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -641,6 +641,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3581,6 +3588,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3622,6 +3630,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3629,6 +3639,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3662,7 +3680,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3699,6 +3719,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3708,6 +3730,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3738,7 +3761,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3786,7 +3810,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3817,9 +3842,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3827,6 +3855,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3850,14 +3885,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL, NULL); + break; + +@@ -3875,7 +3915,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3891,6 +3934,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3965,6 +4009,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4067,6 +4112,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4161,6 +4213,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ExecInitStoredGenerated(resultRelInfo, estate, operation); + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 1a5d29ac9b..aadca8ea47 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 8d46a781bb..150d70cb64 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index 7cd9b2f2bf..b31a7934a4 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -662,5 +662,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 9f176b0e37..a65799dcce 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -624,6 +624,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 8de79c618c..c9226ba5ad 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,18 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} + sub lcopy + { + my $src = shift; +@@ -609,7 +621,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 990c223a9b..cd5048f8d5 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -39,8 +39,8 @@ my $contrib_defines = {}; + my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); +-my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -967,6 +967,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1070,6 +1071,19 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables { ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1094,23 +1108,53 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) +- { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ if ( -f "contrib/$n/$d.in" ) { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } else { ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/patches/REL_16_STABLE-pg_pathman-core.diff b/patches/REL_16_STABLE-pg_pathman-core.diff new file mode 100644 index 00000000..50dad389 --- /dev/null +++ b/patches/REL_16_STABLE-pg_pathman-core.diff @@ -0,0 +1,547 @@ +diff --git a/contrib/Makefile b/contrib/Makefile +index bbf220407b..9a82a2db04 100644 +--- a/contrib/Makefile ++++ b/contrib/Makefile +@@ -34,6 +34,7 @@ SUBDIRS = \ + passwordcheck \ + pg_buffercache \ + pg_freespacemap \ ++ pg_pathman \ + pg_prewarm \ + pg_stat_statements \ + pg_surgery \ +diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c +index 4a2ea4adba..7cadde5499 100644 +--- a/src/backend/access/transam/xact.c ++++ b/src/backend/access/transam/xact.c +@@ -79,7 +79,7 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED; + int XactIsoLevel = XACT_READ_COMMITTED; + + bool DefaultXactReadOnly = false; +-bool XactReadOnly; ++bool XactReadOnly = false; + + bool DefaultXactDeferrable = false; + bool XactDeferrable; +diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c +index 6b7997465d..5e9e878d3b 100644 +--- a/src/backend/executor/execExprInterp.c ++++ b/src/backend/executor/execExprInterp.c +@@ -1845,6 +1845,16 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull) + } + + out: ++ ++ /* ++ * pg_pathman: pass 'tts_tableOid' to result tuple to determine from ++ * which partition the tuple was read ++ */ ++ if (resultslot) ++ { ++ resultslot->tts_tableOid = scanslot ? scanslot->tts_tableOid : ++ (innerslot ? innerslot->tts_tableOid : (outerslot ? outerslot->tts_tableOid : InvalidOid)); ++ } + *isnull = state->resnull; + return state->resvalue; + } +diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c +index 4c5a7bbf62..7d638aa22d 100644 +--- a/src/backend/executor/execMain.c ++++ b/src/backend/executor/execMain.c +@@ -561,6 +561,39 @@ ExecutorRewind(QueryDesc *queryDesc) + } + + ++/* ++ * ExecCheckOneRtePermissions ++ * Check access permissions for one RTE ++ * ++ * Returns true if permissions are adequate. Otherwise, throws an appropriate ++ * error if ereport_on_violation is true, or simply returns false otherwise. ++ * ++ * This function uses pg_pathman due to commit f75cec4fff, see PGPRO-7792 ++ */ ++bool ++ExecCheckOneRtePermissions(RangeTblEntry *rte, RTEPermissionInfo *perminfo, ++ bool ereport_on_violation) ++{ ++ bool result = true; ++ ++ Assert(OidIsValid(perminfo->relid)); ++ Assert(rte->relid == perminfo->relid); ++ ++ result = ExecCheckOneRelPerms(perminfo); ++ ++ if (!result) ++ { ++ if (ereport_on_violation) ++ aclcheck_error(ACLCHECK_NO_PRIV, ++ get_relkind_objtype(get_rel_relkind(perminfo->relid)), ++ get_rel_name(perminfo->relid)); ++ return false; ++ } ++ ++ return result; ++} ++ ++ + /* + * ExecCheckPermissions + * Check access permissions of relations mentioned in a query +@@ -856,6 +889,13 @@ InitPlan(QueryDesc *queryDesc, int eflags) + + estate->es_plannedstmt = plannedstmt; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ estate->es_result_relation_info = NULL; ++ estate->es_original_tuple = NULL; ++ + /* + * Next, build the ExecRowMark array from the PlanRowMark(s), if any. + */ +@@ -2873,6 +2913,13 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree) + rcestate->es_output_cid = parentestate->es_output_cid; + rcestate->es_queryEnv = parentestate->es_queryEnv; + ++ /* ++ * Fields "es_result_relation_info", "es_original_tuple" are used for ++ * pg_pathman only: ++ */ ++ rcestate->es_result_relation_info = NULL; ++ rcestate->es_original_tuple = NULL; ++ + /* + * ResultRelInfos needed by subplans are initialized from scratch when the + * subplans themselves are initialized. +diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c +index c84caeeaee..2a355607e9 100644 +--- a/src/backend/executor/nodeModifyTable.c ++++ b/src/backend/executor/nodeModifyTable.c +@@ -660,6 +660,13 @@ ExecInitUpdateProjection(ModifyTableState *mtstate, + resultRelInfo->ri_projectNewInfoValid = true; + } + ++void ++PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo) ++{ ++ ExecInitUpdateProjection(mtstate, resultRelInfo); ++} ++ + /* + * ExecGetInsertNewTuple + * This prepares a "new" tuple ready to be inserted into given result +@@ -3570,6 +3577,7 @@ ExecModifyTable(PlanState *pstate) + HeapTupleData oldtupdata; + HeapTuple oldtuple; + ItemPointer tupleid; ++ ResultRelInfo *saved_resultRelInfo; + + CHECK_FOR_INTERRUPTS(); + +@@ -3611,6 +3619,8 @@ ExecModifyTable(PlanState *pstate) + context.mtstate = node; + context.epqstate = &node->mt_epqstate; + context.estate = estate; ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = NULL; + + /* + * Fetch rows from subplan, and execute the required table modification +@@ -3618,6 +3628,14 @@ ExecModifyTable(PlanState *pstate) + */ + for (;;) + { ++ /* ++ * "es_original_tuple" should contain original modified tuple (new ++ * values of the changed columns plus row identity information such as ++ * CTID) in case tuple planSlot is replaced in pg_pathman to new value ++ * in call "ExecProcNode(subplanstate)". ++ */ ++ estate->es_original_tuple = NULL; ++ + /* + * Reset the per-output-tuple exprcontext. This is needed because + * triggers expect to use that context as workspace. It's a bit ugly +@@ -3651,7 +3669,9 @@ ExecModifyTable(PlanState *pstate) + bool isNull; + Oid resultoid; + +- datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ? ++ estate->es_original_tuple : context.planSlot, ++ node->mt_resultOidAttno, + &isNull); + if (isNull) + { +@@ -3688,6 +3708,8 @@ ExecModifyTable(PlanState *pstate) + if (resultRelInfo->ri_usesFdwDirectModify) + { + Assert(resultRelInfo->ri_projectReturning); ++ /* PartitionRouter does not support foreign data wrappers: */ ++ Assert(estate->es_original_tuple == NULL); + + /* + * A scan slot containing the data that was actually inserted, +@@ -3697,6 +3719,7 @@ ExecModifyTable(PlanState *pstate) + */ + slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot); + ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; + } + +@@ -3727,7 +3750,8 @@ ExecModifyTable(PlanState *pstate) + { + /* ri_RowIdAttNo refers to a ctid attribute */ + Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)); +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + +@@ -3775,7 +3799,8 @@ ExecModifyTable(PlanState *pstate) + */ + else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo)) + { +- datum = ExecGetJunkAttribute(slot, ++ datum = ExecGetJunkAttribute(estate->es_original_tuple ++ ? estate->es_original_tuple : slot, + resultRelInfo->ri_RowIdAttNo, + &isNull); + /* shouldn't ever get a null result... */ +@@ -3806,9 +3831,12 @@ ExecModifyTable(PlanState *pstate) + /* Initialize projection info if first time for this table */ + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitInsertProjection(node, resultRelInfo); +- slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); +- slot = ExecInsert(&context, resultRelInfo, slot, +- node->canSetTag, NULL, NULL); ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot); ++ slot = ExecInsert(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ slot, node->canSetTag, NULL, NULL); + break; + + case CMD_UPDATE: +@@ -3816,6 +3844,13 @@ ExecModifyTable(PlanState *pstate) + if (unlikely(!resultRelInfo->ri_projectNewInfoValid)) + ExecInitUpdateProjection(node, resultRelInfo); + ++ /* ++ * Do not change the indentation for PostgreSQL code to make it ++ * easier to merge new PostgreSQL changes. ++ */ ++ /* Do nothing in case tuple was modified in pg_pathman: */ ++ if (!estate->es_original_tuple) ++ { + /* + * Make the new tuple by combining plan's output tuple with + * the old tuple being updated. +@@ -3839,14 +3874,19 @@ ExecModifyTable(PlanState *pstate) + slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot, + oldSlot); + context.relaction = NULL; ++ } + + /* Now apply the update. */ +- slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecUpdate(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + slot, node->canSetTag); + break; + + case CMD_DELETE: +- slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple, ++ slot = ExecDelete(&context, estate->es_result_relation_info ? ++ estate->es_result_relation_info : resultRelInfo, ++ tupleid, oldtuple, + true, false, node->canSetTag, NULL, NULL, NULL); + break; + +@@ -3864,7 +3904,10 @@ ExecModifyTable(PlanState *pstate) + * the work on next call. + */ + if (slot) ++ { ++ estate->es_result_relation_info = saved_resultRelInfo; + return slot; ++ } + } + + /* +@@ -3880,6 +3923,7 @@ ExecModifyTable(PlanState *pstate) + + node->mt_done = true; + ++ estate->es_result_relation_info = saved_resultRelInfo; + return NULL; + } + +@@ -3954,6 +3998,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + ListCell *l; + int i; + Relation rel; ++ ResultRelInfo *saved_resultRelInfo; + + /* check for unsupported flags */ + Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); +@@ -4056,6 +4101,13 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + i++; + } + ++ /* ++ * pg_pathman: set "estate->es_result_relation_info" value for take it in ++ * functions partition_filter_begin(), partition_router_begin() ++ */ ++ saved_resultRelInfo = estate->es_result_relation_info; ++ estate->es_result_relation_info = mtstate->resultRelInfo; ++ + /* + * Now we may initialize the subplan. + */ +@@ -4138,6 +4190,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) + } + } + ++ estate->es_result_relation_info = saved_resultRelInfo; ++ + /* + * If this is an inherited update/delete/merge, there will be a junk + * attribute named "tableoid" present in the subplan's targetlist. It +diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c +index 011ec18015..7b4fcb2807 100644 +--- a/src/backend/utils/init/globals.c ++++ b/src/backend/utils/init/globals.c +@@ -25,7 +25,7 @@ + #include "storage/backendid.h" + + +-ProtocolVersion FrontendProtocol; ++ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; + + volatile sig_atomic_t InterruptPending = false; + volatile sig_atomic_t QueryCancelPending = false; +diff --git a/src/include/access/xact.h b/src/include/access/xact.h +index 7d3b9446e6..20030111f4 100644 +--- a/src/include/access/xact.h ++++ b/src/include/access/xact.h +@@ -53,6 +53,8 @@ extern PGDLLIMPORT int XactIsoLevel; + + /* Xact read-only state */ + extern PGDLLIMPORT bool DefaultXactReadOnly; ++ ++#define PGPRO_PATHMAN_AWARE_COPY + extern PGDLLIMPORT bool XactReadOnly; + + /* flag for logging statements in this transaction */ +diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h +index ac02247947..c39ae13a8e 100644 +--- a/src/include/executor/executor.h ++++ b/src/include/executor/executor.h +@@ -208,6 +208,9 @@ extern void standard_ExecutorFinish(QueryDesc *queryDesc); + extern void ExecutorEnd(QueryDesc *queryDesc); + extern void standard_ExecutorEnd(QueryDesc *queryDesc); + extern void ExecutorRewind(QueryDesc *queryDesc); ++extern bool ExecCheckOneRtePermissions(RangeTblEntry *rte, ++ RTEPermissionInfo *perminfo, ++ bool ereport_on_violation); + extern bool ExecCheckPermissions(List *rangeTable, + List *rteperminfos, bool ereport_on_violation); + extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation); +@@ -676,5 +679,17 @@ extern ResultRelInfo *ExecLookupResultRelByOid(ModifyTableState *node, + Oid resultoid, + bool missing_ok, + bool update_cache); ++#define PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION ++/* ++ * This function is static in vanilla, but pg_pathman wants it exported. ++ * We cannot make it extern with the same name to avoid compilation errors ++ * in timescaledb, which ships it's own static copy of the same function. ++ * So, export ExecInitUpdateProjection with Pgpro prefix. ++ * ++ * The define above helps pg_pathman to expect proper exported symbol ++ * from various versions of pgpro. ++ */ ++extern void PgproExecInitUpdateProjection(ModifyTableState *mtstate, ++ ResultRelInfo *resultRelInfo); + + #endif /* EXECUTOR_H */ +diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h +index 869465d6f8..6bdde351d7 100644 +--- a/src/include/nodes/execnodes.h ++++ b/src/include/nodes/execnodes.h +@@ -638,6 +638,12 @@ typedef struct EState + * es_result_relations in no + * specific order */ + ++ /* These fields was added for compatibility pg_pathman with 14: */ ++ ResultRelInfo *es_result_relation_info; /* currently active array elt */ ++ TupleTableSlot *es_original_tuple; /* original modified tuple (new values ++ * of the changed columns plus row ++ * identity information such as CTID) */ ++ + PartitionDirectory es_partition_directory; /* for PartitionDesc lookup */ + + /* +diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm +index 05548d7c0a..37754370e0 100644 +--- a/src/tools/msvc/Install.pm ++++ b/src/tools/msvc/Install.pm +@@ -30,6 +30,22 @@ my @client_program_files = ( + 'pg_receivewal', 'pg_recvlogical', 'pg_restore', 'psql', + 'reindexdb', 'vacuumdb', @client_contribs); + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub lcopy + { + my $src = shift; +@@ -580,7 +596,7 @@ sub ParseAndCleanRule + substr($flist, 0, index($flist, '$(addsuffix ')) + . substr($flist, $i + 1); + } +- return $flist; ++ return SubstituteMakefileVariables($flist, $mf); + } + + sub CopyIncludeFiles +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 6a79a0e037..93696f53ae 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -40,7 +40,7 @@ my @contrib_uselibpq = (); + my @contrib_uselibpgport = (); + my @contrib_uselibpgcommon = (); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; +-my $contrib_extraincludes = {}; ++my $contrib_extraincludes = { 'pg_pathman' => ['contrib/pg_pathman/src/include'] }; + my $contrib_extrasource = {}; + my @contrib_excludes = ( + 'bool_plperl', 'commit_ts', +@@ -980,6 +980,7 @@ sub AddContrib + my $dn = $1; + my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n"); + $proj->AddReference($postgres); ++ $proj->RemoveFile("$subdir/$n/src/declarative.c") if $n eq 'pg_pathman'; + AdjustContribProj($proj); + push @projects, $proj; + } +@@ -1083,6 +1084,22 @@ sub AddContrib + return; + } + ++sub SubstituteMakefileVariables ++{ ++ local $_ = shift; # Line to substitue ++ my $mf = shift; # Makefile text ++ while (/\$\((\w+)\)/) ++ { ++ my $varname = $1; ++ if ($mf =~ /^$varname\s*=\s*(.*)$/mg) ++ { ++ my $varvalue=$1; ++ s/\$\($varname\)/$varvalue/g; ++ } ++ } ++ return $_; ++} ++ + sub GenerateContribSqlFiles + { + my $n = shift; +@@ -1107,23 +1124,59 @@ sub GenerateContribSqlFiles + substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i + 1); + } + ++ $l = SubstituteMakefileVariables($l,$mf); + foreach my $d (split /\s+/, $l) + { +- my $in = "$d.in"; +- my $out = "$d"; +- +- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ if ( -f "contrib/$n/$d.in" ) ++ { ++ my $in = "$d.in"; ++ my $out = "$d"; ++ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in")) ++ { ++ print "Building $out from $in (contrib/$n)...\n"; ++ my $cont = Project::read_file("contrib/$n/$in"); ++ my $dn = $out; ++ $dn =~ s/\.sql$//; ++ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; ++ my $o; ++ open($o, '>', "contrib/$n/$out") ++ || croak "Could not write to contrib/$n/$d"; ++ print $o $cont; ++ close($o); ++ } ++ } ++ else + { +- print "Building $out from $in (contrib/$n)...\n"; +- my $cont = Project::read_file("contrib/$n/$in"); +- my $dn = $out; +- $dn =~ s/\.sql$//; +- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g; +- my $o; +- open($o, '>', "contrib/$n/$out") +- || croak "Could not write to contrib/$n/$d"; +- print $o $cont; +- close($o); ++ # Search for makefile rule. ++ # For now we do not process rule command and assume ++ # that we should just concatenate all prerequisites ++ # ++ my @prereq = (); ++ my $target; ++ my @rules = $mf =~ /^(\S+)\s*:\s*([^=].*)$/mg; ++ RULE: ++ while (@rules) ++ { ++ $target = SubstituteMakefileVariables(shift @rules,$mf); ++ @prereq = split(/\s+/,SubstituteMakefileVariables(shift @rules,$mf)); ++ last RULE if ($target eq $d); ++ @prereq = (); ++ } ++ croak "Don't know how to build contrib/$n/$d" unless @prereq; ++ if (grep(Solution::IsNewer("contrib/$n/$d","contrib/$n/$_"), ++ @prereq)) ++ { ++ print STDERR "building $d from @prereq by concatentation\n"; ++ my $o; ++ open $o, ">contrib/$n/$d" ++ or croak("Couldn't write to contrib/$n/$d:$!"); ++ for my $in (@prereq) ++ { ++ my $data = Project::read_file("contrib/$n/$in"); ++ print $o $data; ++ } ++ close $o; ++ } + } + } + } diff --git a/pg_compat_available.sh b/pg_compat_available.sh new file mode 100755 index 00000000..d2d7cabc --- /dev/null +++ b/pg_compat_available.sh @@ -0,0 +1,6 @@ +#!/usr/bin/bash + +dir=$(dirname $0) +func="$1" + +grep -n -r --include=pg_compat.c --include=pg_compat.h $func $dir | head -n1 diff --git a/pg_pathman--1.4--1.5.sql b/pg_pathman--1.4--1.5.sql new file mode 100644 index 00000000..2aa02bf9 --- /dev/null +++ b/pg_pathman--1.4--1.5.sql @@ -0,0 +1,955 @@ +ALTER TABLE @extschema@.pathman_config DROP CONSTRAINT pathman_config_interval_check; + +DROP FUNCTION @extschema@.validate_interval_value(REGCLASS, TEXT, INTEGER, + TEXT, TEXT); +CREATE OR REPLACE FUNCTION @extschema@.validate_interval_value( + partrel REGCLASS, + expr TEXT, + parttype INTEGER, + range_interval TEXT) +RETURNS BOOL AS 'pg_pathman', 'validate_interval_value' +LANGUAGE C; + +ALTER TABLE @extschema@.pathman_config DROP COLUMN cooked_expr; +/* + * Dropped columns are never actually purged, entry in pg_attribute remains. + * Since dealing with different number of attrs in C code is cumbersome, + * let's recreate table instead. + */ +CREATE TABLE @extschema@.pathman_config_tmp (LIKE @extschema@.pathman_config INCLUDING ALL); +INSERT INTO @extschema@.pathman_config_tmp SELECT * FROM @extschema@.pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger DISABLE; +DROP TABLE @extschema@.pathman_config; +ALTER TABLE @extschema@.pathman_config_tmp RENAME TO pathman_config; +ALTER EVENT TRIGGER pathman_ddl_trigger ENABLE; + +/* + * Get back stuff not preserved by CREATE TABLE LIKE: ACL, RLS and + * pg_extension_config_dump mark. + */ + +GRANT SELECT, INSERT, UPDATE, DELETE +ON @extschema@.pathman_config +TO public; + +/* + * Row security policy to restrict partitioning operations to owner and superusers only + */ +CREATE POLICY deny_modification ON @extschema@.pathman_config +FOR ALL USING (check_security_policy(partrel)); +CREATE POLICY allow_select ON @extschema@.pathman_config FOR SELECT USING (true); +ALTER TABLE @extschema@.pathman_config ENABLE ROW LEVEL SECURITY; + +/* + * Enable dump of config tables with pg_dump. + */ +SELECT pg_catalog.pg_extension_config_dump('@extschema@.pathman_config', ''); + + +ALTER TABLE @extschema@.pathman_config ADD CONSTRAINT pathman_config_interval_check + CHECK (@extschema@.validate_interval_value(partrel, + expr, + parttype, + range_interval)); + +CREATE TRIGGER pathman_config_trigger +AFTER INSERT OR UPDATE OR DELETE ON @extschema@.pathman_config +FOR EACH ROW EXECUTE PROCEDURE @extschema@.pathman_config_params_trigger_func(); + +/* + * Get parsed and analyzed expression. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_partition_cooked_key( + parent_relid REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_partition_cooked_key_pl' +LANGUAGE C STRICT; + +/* + * Add new partition + */ +CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( + parent_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_name TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF start_value >= end_value THEN + RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; + END IF; + + /* Check range overlap */ + IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN + PERFORM @extschema@.check_range_available(parent_relid, + start_value, + end_value); + END IF; + + /* Create new partition */ + part_name := @extschema@.create_single_range_partition(parent_relid, + start_value, + end_value, + partition_name, + tablespace); + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Append new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '+') THEN + RAISE EXCEPTION 'type % does not support ''+'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Attach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( + parent_relid REGCLASS, + partition_relid REGCLASS, + start_value ANYELEMENT, + end_value ANYELEMENT) +RETURNS TEXT AS $$ +DECLARE + part_expr TEXT; + part_type INTEGER; + rel_persistence CHAR; + v_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = partition_relid INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + partition_relid::TEXT; + END IF; + + /* Check range overlap */ + PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); + + IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); + + IF part_expr IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + + /* Set check constraint */ + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid), + @extschema@.build_range_condition(partition_relid, + part_expr, + start_value, + end_value)); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO v_init_callback; + + /* Invoke an initialization callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + partition_relid, + v_init_callback, + start_value, + end_value); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Create a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Creates RANGE partitions for specified relation based on datetime attribute + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval INTERVAL, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION '"p_count" must not be less than 0'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on numerical expression + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + start_value ANYELEMENT, + p_interval ANYELEMENT, + p_count INTEGER DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + rows_count BIGINT; + max_value start_value%TYPE; + cur_value start_value%TYPE := start_value; + end_value start_value%TYPE; + part_count INTEGER := 0; + i INTEGER; + +BEGIN + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + IF p_count < 0 THEN + RAISE EXCEPTION 'partitions count must not be less than zero'; + END IF; + + /* Try to determine partitions count if not set */ + IF p_count IS NULL THEN + EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + INTO rows_count, max_value; + + IF rows_count = 0 THEN + RAISE EXCEPTION 'cannot determine partitions count for empty table'; + END IF; + + IF max_value IS NULL THEN + RAISE EXCEPTION 'expression "%" can return NULL values', expression; + END IF; + + p_count := 0; + WHILE cur_value <= max_value + LOOP + cur_value := cur_value + p_interval; + p_count := p_count + 1; + END LOOP; + END IF; + + /* + * In case when user doesn't want to automatically create partitions + * and specifies partition count as 0 then do not check boundaries + */ + IF p_count != 0 THEN + /* Compute right bound of partitioning through additions */ + end_value := start_value; + FOR i IN 1..p_count + LOOP + end_value := end_value + p_interval; + END LOOP; + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); + END IF; + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, + p_interval::TEXT); + + IF p_count != 0 THEN + part_count := @extschema@.create_range_partitions_internal( + parent_relid, + @extschema@.generate_range_bounds(start_value, + p_interval, + p_count), + NULL, + NULL); + END IF; + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN p_count; +END +$$ LANGUAGE plpgsql; + +/* + * Creates RANGE partitions for specified relation based on bounds array + */ +CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( + parent_relid REGCLASS, + expression TEXT, + bounds ANYARRAY, + partition_names TEXT[] DEFAULT NULL, + tablespaces TEXT[] DEFAULT NULL, + partition_data BOOLEAN DEFAULT TRUE) +RETURNS INTEGER AS $$ +DECLARE + part_count INTEGER := 0; + +BEGIN + IF array_ndims(bounds) > 1 THEN + RAISE EXCEPTION 'Bounds array must be a one dimensional array'; + END IF; + + IF array_length(bounds, 1) < 2 THEN + RAISE EXCEPTION 'Bounds array must have at least two values'; + END IF; + + PERFORM @extschema@.prepare_for_partitioning(parent_relid, + expression, + partition_data); + + /* Check boundaries */ + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + bounds[1], + bounds[array_length(bounds, 1)]); + + /* Create sequence for child partitions names */ + PERFORM @extschema@.create_naming_sequence(parent_relid); + + /* Insert new entry to pathman config */ + PERFORM @extschema@.add_to_pathman_config(parent_relid, expression, NULL); + + /* Create partitions */ + part_count := @extschema@.create_range_partitions_internal(parent_relid, + bounds, + partition_names, + tablespaces); + + /* Relocate data if asked to */ + IF partition_data = true THEN + PERFORM @extschema@.set_enable_parent(parent_relid, false); + PERFORM @extschema@.partition_data(parent_relid); + ELSE + PERFORM @extschema@.set_enable_parent(parent_relid, true); + END IF; + + RETURN part_count; +END +$$ +LANGUAGE plpgsql; + +/* + * Detach range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( + partition_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_type INTEGER; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Remove inheritance */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', + partition_relid::TEXT, + parent_relid::TEXT); + + /* Remove check constraint */ + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + partition_relid::TEXT, + @extschema@.build_check_constraint_name(partition_relid)); + + RETURN partition_relid; +END +$$ LANGUAGE plpgsql; + +/* + * Replace hash partition with another one. It could be useful in case when + * someone wants to attach foreign table as a partition. + * + * lock_parent - should we take an exclusive lock? + */ +CREATE OR REPLACE FUNCTION @extschema@.replace_hash_partition( + old_partition REGCLASS, + new_partition REGCLASS, + lock_parent BOOL DEFAULT TRUE) +RETURNS REGCLASS AS $$ +DECLARE + parent_relid REGCLASS; + old_constr_name TEXT; /* name of old_partition's constraint */ + old_constr_def TEXT; /* definition of old_partition's constraint */ + rel_persistence CHAR; + p_init_callback REGPROCEDURE; + +BEGIN + PERFORM @extschema@.validate_relname(old_partition); + PERFORM @extschema@.validate_relname(new_partition); + + /* Parent relation */ + parent_relid := @extschema@.get_parent_of_partition(old_partition); + + IF lock_parent THEN + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + ELSE + /* Acquire lock on parent */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + END IF; + + /* Acquire data modification lock (prevent further modifications) */ + PERFORM @extschema@.prevent_data_modification(old_partition); + PERFORM @extschema@.prevent_data_modification(new_partition); + + /* Ignore temporary tables */ + SELECT relpersistence FROM pg_catalog.pg_class + WHERE oid = new_partition INTO rel_persistence; + + IF rel_persistence = 't'::CHAR THEN + RAISE EXCEPTION 'temporary table "%" cannot be used as a partition', + new_partition::TEXT; + END IF; + + /* Check that new partition has an equal structure as parent does */ + IF NOT @extschema@.is_tuple_convertible(parent_relid, new_partition) THEN + RAISE EXCEPTION 'partition must have a compatible tuple format'; + END IF; + + /* Check that table is partitioned */ + IF @extschema@.get_partition_key(parent_relid) IS NULL THEN + RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; + END IF; + + /* Fetch name of old_partition's HASH constraint */ + old_constr_name = @extschema@.build_check_constraint_name(old_partition::REGCLASS); + + /* Fetch definition of old_partition's HASH constraint */ + SELECT pg_catalog.pg_get_constraintdef(oid) FROM pg_catalog.pg_constraint + WHERE conrelid = old_partition AND quote_ident(conname) = old_constr_name + INTO old_constr_def; + + /* Detach old partition */ + EXECUTE format('ALTER TABLE %s NO INHERIT %s', old_partition, parent_relid); + EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + old_partition, + old_constr_name); + + /* Attach the new one */ + EXECUTE format('ALTER TABLE %s INHERIT %s', new_partition, parent_relid); + EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s %s', + new_partition, + @extschema@.build_check_constraint_name(new_partition::REGCLASS), + old_constr_def); + + /* Fetch init_callback from 'params' table */ + WITH stub_callback(stub) as (values (0)) + SELECT init_callback + FROM stub_callback + LEFT JOIN @extschema@.pathman_config_params AS params + ON params.partrel = parent_relid + INTO p_init_callback; + + /* Finally invoke init_callback */ + PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, + new_partition, + p_init_callback); + + RETURN new_partition; +END +$$ LANGUAGE plpgsql; + +/* + * Disable pathman partitioning for specified relation. + */ +CREATE OR REPLACE FUNCTION @extschema@.disable_pathman_for( + parent_relid REGCLASS) +RETURNS VOID AS $$ +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Delete rows from both config tables */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; +END +$$ LANGUAGE plpgsql STRICT; + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop partitions. If delete_data set to TRUE, partitions + * will be dropped with all the data. + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_partitions( + parent_relid REGCLASS, + delete_data BOOLEAN DEFAULT FALSE) +RETURNS INTEGER AS $$ +DECLARE + child REGCLASS; + rows_count BIGINT; + part_count INTEGER := 0; + rel_kind CHAR; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire data modification lock */ + PERFORM @extschema@.prevent_data_modification(parent_relid); + + IF NOT EXISTS (SELECT FROM @extschema@.pathman_config + WHERE partrel = parent_relid) THEN + RAISE EXCEPTION 'table "%" has no partitions', parent_relid::TEXT; + END IF; + + /* Also drop naming sequence */ + PERFORM @extschema@.drop_naming_sequence(parent_relid); + + FOR child IN (SELECT inhrelid::REGCLASS + FROM pg_catalog.pg_inherits + WHERE inhparent::regclass = parent_relid + ORDER BY inhrelid ASC) + LOOP + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + child::TEXT); + GET DIAGNOSTICS rows_count = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', rows_count, child; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = child + INTO rel_kind; + + /* + * Determine the kind of child relation. It can be either a regular + * table (r) or a foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF rel_kind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', child); + ELSE + EXECUTE format('DROP TABLE %s', child); + END IF; + + part_count := part_count + 1; + END LOOP; + + /* Finally delete both config entries */ + DELETE FROM @extschema@.pathman_config WHERE partrel = parent_relid; + DELETE FROM @extschema@.pathman_config_params WHERE partrel = parent_relid; + + RETURN part_count; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +/* + * Drop range partition + */ +CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( + partition_relid REGCLASS, + delete_data BOOLEAN DEFAULT TRUE) +RETURNS TEXT AS $$ +DECLARE + parent_relid REGCLASS; + part_name TEXT; + part_type INTEGER; + v_relkind CHAR; + v_rows BIGINT; + +BEGIN + parent_relid := @extschema@.get_parent_of_partition(partition_relid); + + PERFORM @extschema@.validate_relname(parent_relid); + PERFORM @extschema@.validate_relname(partition_relid); + + part_name := partition_relid::TEXT; /* save the name to be returned */ + part_type := @extschema@.get_partition_type(parent_relid); + + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + IF NOT delete_data THEN + EXECUTE format('INSERT INTO %s SELECT * FROM %s', + parent_relid::TEXT, + partition_relid::TEXT); + GET DIAGNOSTICS v_rows = ROW_COUNT; + + /* Show number of copied rows */ + RAISE NOTICE '% rows copied from %', v_rows, partition_relid::TEXT; + END IF; + + SELECT relkind FROM pg_catalog.pg_class + WHERE oid = partition_relid + INTO v_relkind; + + /* + * Determine the kind of child relation. It can be either regular + * table (r) or foreign table (f). Depending on relkind we use + * DROP TABLE or DROP FOREIGN TABLE. + */ + IF v_relkind = 'f' THEN + EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + ELSE + EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + END IF; + + RETURN part_name; +END +$$ LANGUAGE plpgsql +SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is OFF */ + +CREATE FUNCTION @extschema@.pathman_version() +RETURNS CSTRING AS 'pg_pathman', 'pathman_version' +LANGUAGE C STRICT; + +/* + * Get number of partitions managed by pg_pathman. + */ +CREATE OR REPLACE FUNCTION @extschema@.get_number_of_partitions( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT count(*)::INT4 + FROM pg_catalog.pg_inherits + WHERE inhparent = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key. + */ +DROP FUNCTION @extschema@.get_partition_key(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key( + parent_relid REGCLASS) +RETURNS TEXT AS +$$ + SELECT expr + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Get partitioning key type. + */ +DROP FUNCTION @extschema@.get_partition_key_type(REGCLASS); +CREATE FUNCTION @extschema@.get_partition_key_type( + parent_relid REGCLASS) +RETURNS REGTYPE AS 'pg_pathman', 'get_partition_key_type_pl' +LANGUAGE C STRICT; + +/* + * Get partitioning type. + */ +DROP FUNCTION @extschema@.get_partition_type(REGCLASS); +CREATE OR REPLACE FUNCTION @extschema@.get_partition_type( + parent_relid REGCLASS) +RETURNS INT4 AS +$$ + SELECT parttype + FROM @extschema@.pathman_config + WHERE partrel = parent_relid; +$$ +LANGUAGE sql STRICT; + +/* + * Merge RANGE partitions. + */ +DROP FUNCTION @extschema@.merge_range_partitions(regclass[]); +DROP FUNCTION @extschema@.merge_range_partitions(regclass, regclass); + +CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' +LANGUAGE C STRICT; + +/* + * Prepend new partition. + */ +CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( + parent_relid REGCLASS, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS TEXT AS $$ +DECLARE + part_expr_type REGTYPE; + part_name TEXT; + part_interval TEXT; + +BEGIN + PERFORM @extschema@.validate_relname(parent_relid); + + /* Acquire lock on parent's scheme */ + PERFORM @extschema@.prevent_part_modification(parent_relid); + + part_expr_type := @extschema@.get_partition_key_type(parent_relid); + + IF NOT @extschema@.is_date_type(part_expr_type) AND + NOT @extschema@.is_operator_supported(part_expr_type, '-') THEN + RAISE EXCEPTION 'type % does not support ''-'' operator', part_expr_type::REGTYPE; + END IF; + + SELECT range_interval + FROM @extschema@.pathman_config + WHERE partrel = parent_relid + INTO part_interval; + + EXECUTE + format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + @extschema@.get_base_type(part_expr_type)::TEXT) + USING + parent_relid, + part_expr_type, + part_interval, + partition_name, + tablespace + INTO + part_name; + + RETURN part_name; +END +$$ LANGUAGE plpgsql; + +/* + * Show all existing concurrent partitioning tasks. + */ +DROP VIEW @extschema@.pathman_concurrent_part_tasks; +DROP FUNCTION @extschema@.show_concurrent_part_tasks(); +CREATE FUNCTION @extschema@.show_concurrent_part_tasks() +RETURNS TABLE ( + userid REGROLE, + pid INT, + dbid OID, + relid REGCLASS, + processed INT8, + status TEXT) +AS 'pg_pathman', 'show_concurrent_part_tasks_internal' +LANGUAGE C STRICT; + +CREATE VIEW @extschema@.pathman_concurrent_part_tasks +AS SELECT * FROM @extschema@.show_concurrent_part_tasks(); +GRANT SELECT ON @extschema@.pathman_concurrent_part_tasks TO PUBLIC; + +/* + * Split RANGE partition in two using a pivot. + */ +DROP FUNCTION @extschema@.split_range_partition(regclass, anyelement, text, text, OUT anyarray); +CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +DROP FUNCTION @extschema@.build_update_trigger_func_name(regclass); +DROP FUNCTION @extschema@.build_update_trigger_name(regclass); +DROP FUNCTION @extschema@.create_single_update_trigger(regclass, regclass); +DROP FUNCTION @extschema@.create_update_triggers(regclass); +DROP FUNCTION @extschema@.drop_triggers(regclass); +DROP FUNCTION @extschema@.has_update_trigger(regclass); +DROP FUNCTION @extschema@.pathman_update_trigger_func() CASCADE; +DROP FUNCTION @extschema@.get_pathman_lib_version(); diff --git a/pg_pathman.control b/pg_pathman.control index 0d6af5d3..138b26c6 100644 --- a/pg_pathman.control +++ b/pg_pathman.control @@ -1,4 +1,4 @@ # pg_pathman extension comment = 'Partitioning tool for PostgreSQL' -default_version = '1.4' +default_version = '1.5' module_pathname = '$libdir/pg_pathman' diff --git a/range.sql b/range.sql index 44c6697f..5af17014 100644 --- a/range.sql +++ b/range.sql @@ -3,7 +3,7 @@ * range.sql * RANGE partitioning functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,7 +11,7 @@ /* * Check RANGE partition boundaries. */ -CREATE OR REPLACE FUNCTION @extschema@.check_boundaries( +CREATE FUNCTION @extschema@.check_boundaries( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -24,7 +24,7 @@ DECLARE BEGIN /* Get min and max values */ - EXECUTE format('SELECT count(*), min(%1$s), max(%1$s) + EXECUTE pg_catalog.format('SELECT count(*), min(%1$s), max(%1$s) FROM %2$s WHERE NOT %1$s IS NULL', expression, parent_relid::TEXT) INTO rows_count, min_value, max_value; @@ -49,7 +49,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on datetime attribute */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -59,7 +59,6 @@ CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( RETURNS INTEGER AS $$ DECLARE rows_count BIGINT; - value_type REGTYPE; max_value start_value%TYPE; cur_value start_value%TYPE := start_value; end_value start_value%TYPE; @@ -77,7 +76,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -92,14 +91,12 @@ BEGIN END LOOP; END IF; - value_type := @extschema@.get_base_type(pg_typeof(start_value)); - /* * In case when user doesn't want to automatically create partitions * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP @@ -107,14 +104,10 @@ BEGIN END LOOP; /* Check boundaries */ - EXECUTE - format('SELECT @extschema@.check_boundaries(''%s'', $1, ''%s'', ''%s''::%s)', - parent_relid, - start_value, - end_value, - value_type::TEXT) - USING - expression; + PERFORM @extschema@.check_boundaries(parent_relid, + expression, + start_value, + end_value); END IF; /* Create sequence for child partitions names */ @@ -149,7 +142,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on numerical expression */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -176,7 +169,7 @@ BEGIN /* Try to determine partitions count if not set */ IF p_count IS NULL THEN - EXECUTE format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) + EXECUTE pg_catalog.format('SELECT count(*), max(%s) FROM %s', expression, parent_relid) INTO rows_count, max_value; IF rows_count = 0 THEN @@ -200,14 +193,14 @@ BEGIN * and specifies partition count as 0 then do not check boundaries */ IF p_count != 0 THEN - /* compute right bound of partitioning through additions */ + /* Compute right bound of partitioning through additions */ end_value := start_value; FOR i IN 1..p_count LOOP end_value := end_value + p_interval; END LOOP; - /* check boundaries */ + /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, start_value, @@ -246,7 +239,7 @@ $$ LANGUAGE plpgsql; /* * Creates RANGE partitions for specified relation based on bounds array */ -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions( +CREATE FUNCTION @extschema@.create_range_partitions( parent_relid REGCLASS, expression TEXT, bounds ANYARRAY, @@ -273,8 +266,8 @@ BEGIN /* Check boundaries */ PERFORM @extschema@.check_boundaries(parent_relid, expression, - bounds[0], - bounds[array_length(bounds, 1) - 1]); + bounds[1], + bounds[array_length(bounds, 1)]); /* Create sequence for child partitions names */ PERFORM @extschema@.create_naming_sequence(parent_relid); @@ -301,113 +294,10 @@ END $$ LANGUAGE plpgsql; - -/* - * Split RANGE partition - */ -CREATE OR REPLACE FUNCTION @extschema@.split_range_partition( - partition_relid REGCLASS, - split_value ANYELEMENT, - partition_name TEXT DEFAULT NULL, - tablespace TEXT DEFAULT NULL, - OUT p_range ANYARRAY) -RETURNS ANYARRAY AS $$ -DECLARE - parent_relid REGCLASS; - part_type INTEGER; - part_expr TEXT; - part_expr_type REGTYPE; - check_name TEXT; - check_cond TEXT; - new_partition TEXT; - -BEGIN - parent_relid = @extschema@.get_parent_of_partition(partition_relid); - - PERFORM @extschema@.validate_relname(parent_relid); - PERFORM @extschema@.validate_relname(partition_relid); - - /* Acquire lock on parent */ - PERFORM @extschema@.prevent_part_modification(parent_relid); - - /* Acquire data modification lock (prevent further modifications) */ - PERFORM @extschema@.prevent_data_modification(partition_relid); - - part_expr_type = @extschema@.get_partition_key_type(parent_relid); - part_expr := @extschema@.get_partition_key(parent_relid); - - part_type := @extschema@.get_partition_type(parent_relid); - - /* Check if this is a RANGE partition */ - IF part_type != 2 THEN - RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; - END IF; - - /* Get partition values range */ - EXECUTE format('SELECT @extschema@.get_part_range($1, NULL::%s)', - @extschema@.get_base_type(part_expr_type)::TEXT) - USING partition_relid - INTO p_range; - - IF p_range IS NULL THEN - RAISE EXCEPTION 'could not find specified partition'; - END IF; - - /* Check if value fit into the range */ - IF p_range[1] > split_value OR p_range[2] <= split_value - THEN - RAISE EXCEPTION 'specified value does not fit into the range [%, %)', - p_range[1], p_range[2]; - END IF; - - /* Create new partition */ - new_partition := @extschema@.create_single_range_partition(parent_relid, - split_value, - p_range[2], - partition_name, - tablespace); - - /* Copy data */ - check_cond := @extschema@.build_range_condition(new_partition::regclass, - part_expr, split_value, p_range[2]); - EXECUTE format('WITH part_data AS (DELETE FROM %s WHERE %s RETURNING *) - INSERT INTO %s SELECT * FROM part_data', - partition_relid::TEXT, - check_cond, - new_partition); - - /* Alter original partition */ - check_cond := @extschema@.build_range_condition(partition_relid::regclass, - part_expr, p_range[1], split_value); - check_name := @extschema@.build_check_constraint_name(partition_relid); - - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', - partition_relid::TEXT, - check_name); - - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', - partition_relid::TEXT, - check_name, - check_cond); -END -$$ LANGUAGE plpgsql; - -/* - * The special case of merging two partitions - */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partition1 REGCLASS, - partition2 REGCLASS) -RETURNS VOID AS $$ -BEGIN - PERFORM @extschema@.merge_range_partitions(array[partition1, partition2]::regclass[]); -END -$$ LANGUAGE plpgsql; - /* * Append new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.append_range_partition( +CREATE FUNCTION @extschema@.append_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -420,7 +310,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -436,7 +326,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.append_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -457,7 +347,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.append_partition_internal( +CREATE FUNCTION @extschema@.append_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -478,7 +368,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, -1, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -488,13 +378,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, ($2 + $3::interval)::%s, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2, $2 + $3::%s, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[2], @@ -511,7 +401,7 @@ $$ LANGUAGE plpgsql; /* * Prepend new partition. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_range_partition( +CREATE FUNCTION @extschema@.prepend_range_partition( parent_relid REGCLASS, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) @@ -524,7 +414,7 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); part_expr_type := @extschema@.get_partition_key_type(parent_relid); @@ -540,7 +430,7 @@ BEGIN INTO part_interval; EXECUTE - format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', + pg_catalog.format('SELECT @extschema@.prepend_partition_internal($1, $2, $3, ARRAY[]::%s[], $4, $5)', @extschema@.get_base_type(part_expr_type)::TEXT) USING parent_relid, @@ -561,7 +451,7 @@ $$ LANGUAGE plpgsql; * * NOTE: we don't take a xact_handling lock here. */ -CREATE OR REPLACE FUNCTION @extschema@.prepend_partition_internal( +CREATE FUNCTION @extschema@.prepend_partition_internal( parent_relid REGCLASS, p_atttype REGTYPE, p_interval TEXT, @@ -582,7 +472,7 @@ BEGIN part_expr_type := @extschema@.get_base_type(p_atttype); /* We have to pass fake NULL casted to column's type */ - EXECUTE format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', + EXECUTE pg_catalog.format('SELECT @extschema@.get_part_range($1, 0, NULL::%s)', part_expr_type::TEXT) USING parent_relid INTO p_range; @@ -592,13 +482,13 @@ BEGIN END IF; IF @extschema@.is_date_type(p_atttype) THEN - v_args_format := format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, ($2 - $3::interval)::%s, $2, $4, $5', part_expr_type::TEXT); ELSE - v_args_format := format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); + v_args_format := pg_catalog.format('$1, $2 - $3::%s, $2, $4, $5', part_expr_type::TEXT); END IF; EXECUTE - format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) + pg_catalog.format('SELECT @extschema@.create_single_range_partition(%s)', v_args_format) USING parent_relid, p_range[1], @@ -615,7 +505,7 @@ $$ LANGUAGE plpgsql; /* * Add new partition */ -CREATE OR REPLACE FUNCTION @extschema@.add_range_partition( +CREATE FUNCTION @extschema@.add_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, @@ -628,14 +518,14 @@ DECLARE BEGIN PERFORM @extschema@.validate_relname(parent_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF start_value >= end_value THEN RAISE EXCEPTION 'failed to create partition: start_value is greater than end_value'; END IF; - /* check range overlap */ + /* Check range overlap */ IF @extschema@.get_number_of_partitions(parent_relid) > 0 THEN PERFORM @extschema@.check_range_available(parent_relid, start_value, @@ -657,7 +547,7 @@ $$ LANGUAGE plpgsql; /* * Drop range partition */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition( +CREATE FUNCTION @extschema@.drop_range_partition( partition_relid REGCLASS, delete_data BOOLEAN DEFAULT TRUE) RETURNS TEXT AS $$ @@ -682,11 +572,11 @@ BEGIN RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; END IF; - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); IF NOT delete_data THEN - EXECUTE format('INSERT INTO %s SELECT * FROM %s', + EXECUTE pg_catalog.format('INSERT INTO %s SELECT * FROM %s', parent_relid::TEXT, partition_relid::TEXT); GET DIAGNOSTICS v_rows = ROW_COUNT; @@ -705,9 +595,9 @@ BEGIN * DROP TABLE or DROP FOREIGN TABLE. */ IF v_relkind = 'f' THEN - EXECUTE format('DROP FOREIGN TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP FOREIGN TABLE %s', partition_relid::TEXT); ELSE - EXECUTE format('DROP TABLE %s', partition_relid::TEXT); + EXECUTE pg_catalog.format('DROP TABLE %s', partition_relid::TEXT); END IF; RETURN part_name; @@ -718,7 +608,7 @@ SET pg_pathman.enable_partitionfilter = off; /* ensures that PartitionFilter is /* * Attach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( +CREATE FUNCTION @extschema@.attach_range_partition( parent_relid REGCLASS, partition_relid REGCLASS, start_value ANYELEMENT, @@ -726,6 +616,7 @@ CREATE OR REPLACE FUNCTION @extschema@.attach_range_partition( RETURNS TEXT AS $$ DECLARE part_expr TEXT; + part_type INTEGER; rel_persistence CHAR; v_init_callback REGPROCEDURE; @@ -733,7 +624,7 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); - /* Acquire lock on parent */ + /* Acquire lock on parent's scheme */ PERFORM @extschema@.prevent_part_modification(parent_relid); /* Ignore temporary tables */ @@ -745,24 +636,32 @@ BEGIN partition_relid::TEXT; END IF; - /* check range overlap */ + /* Check range overlap */ PERFORM @extschema@.check_range_available(parent_relid, start_value, end_value); - IF NOT @extschema@.is_tuple_convertible(parent_relid, partition_relid) THEN + BEGIN + PERFORM @extschema@.is_tuple_convertible(parent_relid, partition_relid); + EXCEPTION WHEN OTHERS THEN RAISE EXCEPTION 'partition must have a compatible tuple format'; - END IF; - - /* Set inheritance */ - EXECUTE format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + END; part_expr := @extschema@.get_partition_key(parent_relid); + part_type := @extschema@.get_partition_type(parent_relid); IF part_expr IS NULL THEN RAISE EXCEPTION 'table "%" is not partitioned', parent_relid::TEXT; END IF; + /* Check if this is a RANGE partition */ + IF part_type != 2 THEN + RAISE EXCEPTION '"%" is not a RANGE partition', partition_relid::TEXT; + END IF; + + /* Set inheritance */ + EXECUTE pg_catalog.format('ALTER TABLE %s INHERIT %s', partition_relid, parent_relid); + /* Set check constraint */ - EXECUTE format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', + EXECUTE pg_catalog.format('ALTER TABLE %s ADD CONSTRAINT %s CHECK (%s)', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid), @extschema@.build_range_condition(partition_relid, @@ -778,11 +677,6 @@ BEGIN ON params.partrel = parent_relid INTO v_init_callback; - /* If update trigger is enabled then create one for this partition */ - if @extschema@.has_update_trigger(parent_relid) THEN - PERFORM @extschema@.create_single_update_trigger(parent_relid, partition_relid); - END IF; - /* Invoke an initialization callback */ PERFORM @extschema@.invoke_on_partition_created_callback(parent_relid, partition_relid, @@ -797,7 +691,7 @@ $$ LANGUAGE plpgsql; /* * Detach range partition */ -CREATE OR REPLACE FUNCTION @extschema@.detach_range_partition( +CREATE FUNCTION @extschema@.detach_range_partition( partition_relid REGCLASS) RETURNS TEXT AS $$ DECLARE @@ -810,6 +704,9 @@ BEGIN PERFORM @extschema@.validate_relname(parent_relid); PERFORM @extschema@.validate_relname(partition_relid); + /* Acquire lock on partition's scheme */ + PERFORM @extschema@.prevent_part_modification(partition_relid); + /* Acquire lock on parent */ PERFORM @extschema@.prevent_data_modification(parent_relid); @@ -821,32 +718,75 @@ BEGIN END IF; /* Remove inheritance */ - EXECUTE format('ALTER TABLE %s NO INHERIT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s NO INHERIT %s', partition_relid::TEXT, parent_relid::TEXT); /* Remove check constraint */ - EXECUTE format('ALTER TABLE %s DROP CONSTRAINT %s', + EXECUTE pg_catalog.format('ALTER TABLE %s DROP CONSTRAINT %s', partition_relid::TEXT, @extschema@.build_check_constraint_name(partition_relid)); - /* Remove update trigger */ - EXECUTE format('DROP TRIGGER IF EXISTS %s ON %s', - @extschema@.build_update_trigger_name(parent_relid), - partition_relid::TEXT); - RETURN partition_relid; END $$ LANGUAGE plpgsql; /* - * Merge multiple partitions. All data will be copied to the first one. - * The rest of partitions will be dropped. + * Create a naming sequence for partitioned table. */ -CREATE OR REPLACE FUNCTION @extschema@.merge_range_partitions( - partitions REGCLASS[]) -RETURNS VOID AS 'pg_pathman', 'merge_range_partitions' +CREATE FUNCTION @extschema@.create_naming_sequence( + parent_relid REGCLASS) +RETURNS TEXT AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); + EXECUTE pg_catalog.format('CREATE SEQUENCE %s START 1', seq_name); + + RETURN seq_name; +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + +/* + * Drop a naming sequence for partitioned table. + */ +CREATE FUNCTION @extschema@.drop_naming_sequence( + parent_relid REGCLASS) +RETURNS VOID AS $$ +DECLARE + seq_name TEXT; + +BEGIN + seq_name := @extschema@.build_sequence_name(parent_relid); + + EXECUTE pg_catalog.format('DROP SEQUENCE IF EXISTS %s', seq_name); +END +$$ LANGUAGE plpgsql +SET client_min_messages = WARNING; /* mute NOTICE message */ + + +/* + * Split RANGE partition in two using a pivot. + */ +CREATE FUNCTION @extschema@.split_range_partition( + partition_relid REGCLASS, + split_value ANYELEMENT, + partition_name TEXT DEFAULT NULL, + tablespace TEXT DEFAULT NULL) +RETURNS REGCLASS AS 'pg_pathman', 'split_range_partition' +LANGUAGE C; + +/* + * Merge RANGE partitions. + */ +CREATE FUNCTION @extschema@.merge_range_partitions( + variadic partitions REGCLASS[]) +RETURNS REGCLASS AS 'pg_pathman', 'merge_range_partitions' LANGUAGE C STRICT; /* @@ -856,13 +796,12 @@ LANGUAGE C STRICT; * DROP PARTITION. In Oracle partitions only have upper bound and when * partition is dropped the next one automatically covers freed range */ -CREATE OR REPLACE FUNCTION @extschema@.drop_range_partition_expand_next( +CREATE FUNCTION @extschema@.drop_range_partition_expand_next( partition_relid REGCLASS) RETURNS VOID AS 'pg_pathman', 'drop_range_partition_expand_next' LANGUAGE C STRICT; - -CREATE OR REPLACE FUNCTION @extschema@.create_range_partitions_internal( +CREATE FUNCTION @extschema@.create_range_partitions_internal( parent_relid REGCLASS, bounds ANYARRAY, partition_names TEXT[], @@ -874,20 +813,19 @@ LANGUAGE C; * Creates new RANGE partition. Returns partition name. * NOTE: This function SHOULD NOT take xact_handling lock (BGWs in 9.5). */ -CREATE OR REPLACE FUNCTION @extschema@.create_single_range_partition( +CREATE FUNCTION @extschema@.create_single_range_partition( parent_relid REGCLASS, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT DEFAULT NULL, tablespace TEXT DEFAULT NULL) RETURNS REGCLASS AS 'pg_pathman', 'create_single_range_partition_pl' -LANGUAGE C -SET client_min_messages = WARNING; +LANGUAGE C; /* * Construct CHECK constraint condition for a range partition. */ -CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( +CREATE FUNCTION @extschema@.build_range_condition( partition_relid REGCLASS, expression TEXT, start_value ANYELEMENT, @@ -895,16 +833,18 @@ CREATE OR REPLACE FUNCTION @extschema@.build_range_condition( RETURNS TEXT AS 'pg_pathman', 'build_range_condition' LANGUAGE C; -CREATE OR REPLACE FUNCTION @extschema@.build_sequence_name( +/* + * Generate a name for naming sequence. + */ +CREATE FUNCTION @extschema@.build_sequence_name( parent_relid REGCLASS) RETURNS TEXT AS 'pg_pathman', 'build_sequence_name' LANGUAGE C STRICT; - /* * Returns N-th range (as an array of two elements). */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( parent_relid REGCLASS, partition_idx INTEGER, dummy ANYELEMENT) @@ -914,7 +854,7 @@ LANGUAGE C; /* * Returns min and max values for specified RANGE partition. */ -CREATE OR REPLACE FUNCTION @extschema@.get_part_range( +CREATE FUNCTION @extschema@.get_part_range( partition_relid REGCLASS, dummy ANYELEMENT) RETURNS ANYARRAY AS 'pg_pathman', 'get_part_range_by_oid' @@ -924,7 +864,7 @@ LANGUAGE C; * Checks if range overlaps with existing partitions. * Returns TRUE if overlaps and FALSE otherwise. */ -CREATE OR REPLACE FUNCTION @extschema@.check_range_available( +CREATE FUNCTION @extschema@.check_range_available( parent_relid REGCLASS, range_min ANYELEMENT, range_max ANYELEMENT) @@ -934,14 +874,14 @@ LANGUAGE C; /* * Generate range bounds starting with 'p_start' using 'p_interval'. */ -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval INTERVAL, p_count INTEGER) RETURNS ANYARRAY AS 'pg_pathman', 'generate_range_bounds_pl' LANGUAGE C STRICT; -CREATE OR REPLACE FUNCTION @extschema@.generate_range_bounds( +CREATE FUNCTION @extschema@.generate_range_bounds( p_start ANYELEMENT, p_interval ANYELEMENT, p_count INTEGER) diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 00000000..2e2edc6f --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,194 @@ +#!/usr/bin/env bash + +# +# Copyright (c) 2018, Postgres Professional +# +# supported levels: +# * standard +# * scan-build +# * hardcore +# * nightmare +# + +set -ux +status=0 + +# global exports +export PGPORT=55435 +export VIRTUAL_ENV_DISABLE_PROMPT=1 + +PATHMAN_DIR=$PWD + +# indicator of using cassert + valgrind support +USE_ASSERT_VALGRIND=false +if [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + USE_ASSERT_VALGRIND=true +fi + +# indicator of using special patch for vanilla +if [ "$(printf '%s\n' "14" "$PG_VERSION" | sort -V | head -n1)" = "$PG_VERSION" ]; then + USE_PATH=false +else + #patch version 14 and newer + USE_PATH=true +fi + +# rebuild PostgreSQL with cassert + valgrind support +if [ "$USE_ASSERT_VALGRIND" = true ] || \ + [ "$USE_PATH" = true ]; then + + set -e + + CUSTOM_PG_BIN=$PWD/pg_bin + CUSTOM_PG_SRC=$PWD/postgresql + + # here PG_VERSION is provided by postgres:X-alpine docker image + curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 + echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - + + mkdir $CUSTOM_PG_SRC + + tar \ + --extract \ + --file postgresql.tar.bz2 \ + --directory $CUSTOM_PG_SRC \ + --strip-components 1 + + cd $CUSTOM_PG_SRC + + if [ "$USE_PATH" = true ]; then + # apply the patch + patch -p1 < $PATHMAN_DIR/patches/REL_${PG_VERSION%.*}_STABLE-pg_pathman-core.diff + fi + + if [ "$USE_ASSERT_VALGRIND" = true ]; then + # enable Valgrind support + sed -i.bak "s/\/* #define USE_VALGRIND *\//#define USE_VALGRIND/g" src/include/pg_config_manual.h + + # enable additional options + ./configure \ + CFLAGS='-Og -ggdb3 -fno-omit-frame-pointer' \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + else + # without additional options + ./configure \ + --enable-cassert \ + --prefix=$CUSTOM_PG_BIN \ + --quiet + fi + + # build & install PG + time make -s -j$(nproc) && make -s install + + # build & install FDW + time make -s -C contrib/postgres_fdw -j$(nproc) && \ + make -s -C contrib/postgres_fdw install + + # override default PostgreSQL instance + export PATH=$CUSTOM_PG_BIN/bin:$PATH + export LD_LIBRARY_PATH=$CUSTOM_PG_BIN/lib + + # show pg_config path (just in case) + which pg_config + + cd - + + set +e +fi + +# show pg_config just in case +pg_config + +# perform code checks if asked to +if [ "$LEVEL" = "scan-build" ] || \ + [ "$LEVEL" = "hardcore" ] || \ + [ "$LEVEL" = "nightmare" ]; then + + # perform static analyzis + scan-build --status-bugs make USE_PGXS=1 || status=$? + + # something's wrong, exit now! + if [ $status -ne 0 ]; then exit 1; fi + + # don't forget to "make clean" + make USE_PGXS=1 clean +fi + + +# build and install extension (using PG_CPPFLAGS and SHLIB_LINK for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" +make USE_PGXS=1 install + +# initialize database +initdb -D $PGDATA + +# change PG's config +echo "port = $PGPORT" >> $PGDATA/postgresql.conf +cat conf.add >> $PGDATA/postgresql.conf + +# restart cluster 'test' +if [ "$LEVEL" = "nightmare" ]; then + ls $CUSTOM_PG_BIN/bin + + valgrind \ + --tool=memcheck \ + --leak-check=no \ + --time-stamp=yes \ + --track-origins=yes \ + --trace-children=yes \ + --gen-suppressions=all \ + --suppressions=$CUSTOM_PG_SRC/src/tools/valgrind.supp \ + --log-file=/tmp/valgrind-%p.log \ + pg_ctl start -l /tmp/postgres.log -w || status=$? +else + pg_ctl start -l /tmp/postgres.log -w || status=$? +fi + +# something's wrong, exit now! +if [ $status -ne 0 ]; then cat /tmp/postgres.log; exit 1; fi + +# run regression tests +export PG_REGRESS_DIFF_OPTS="-w -U3" # for alpine's diff (BusyBox) +make USE_PGXS=1 installcheck || status=$? + +# show diff if it exists +if [ -f regression.diffs ]; then cat regression.diffs; fi + +# run python tests +set +x +virtualenv /tmp/env && source /tmp/env/bin/activate && pip install testgres +make USE_PGXS=1 python_tests || status=$? +deactivate +set -x + +if [ $status -ne 0 ]; then tail -n 2000 tests/python/tests.log; fi + +# show Valgrind logs if necessary +if [ "$LEVEL" = "nightmare" ]; then + for f in $(find /tmp -name valgrind-*.log); do + if grep -q 'Command: [^ ]*/postgres' $f && grep -q 'ERROR SUMMARY: [1-9]' $f; then + echo "========= Contents of $f" + cat $f + status=1 + fi + done +fi + +# run cmocka tests (using CFLAGS_SL for gcov) +make USE_PGXS=1 PG_CPPFLAGS="-coverage" cmocka_tests || status=$? + +# something's wrong, exit now! +if [ $status -ne 0 ]; then exit 1; fi + +# generate *.gcov files +gcov *.c *.h + + +set +ux + + +# send coverage stats to Codecov +bash <(curl -s https://codecov.io/bash) diff --git a/specs/for_update.spec b/specs/for_update.spec index f7a8f758..c18cd4f8 100644 --- a/specs/for_update.spec +++ b/specs/for_update.spec @@ -19,8 +19,6 @@ step "s1_r" { rollback; } step "s1_update" { update test_tbl set id = 2 where id = 1; } session "s2" -step "s2_b" { begin; } -step "s2_c" { commit; } step "s2_select_locked" { select * from test_tbl where id = 1 for share; } step "s2_select" { select * from test_tbl where id = 1; } diff --git a/specs/insert_nodes.spec b/specs/insert_nodes.spec index 3bb67746..a5d0c7f9 100644 --- a/specs/insert_nodes.spec +++ b/specs/insert_nodes.spec @@ -17,20 +17,19 @@ session "s1" step "s1b" { BEGIN; } step "s1_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s1_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s1_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s1_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s1r" { ROLLBACK; } -step "s1c" { COMMIT; } session "s2" step "s2b" { BEGIN; } step "s2_insert_150" { INSERT INTO range_rel SELECT generate_series(1, 150); } step "s2_insert_300" { INSERT INTO range_rel SELECT generate_series(151, 300); } -step "s2_show_partitions" { SELECT c.consrc FROM pg_inherits i LEFT JOIN pg_constraint c +step "s2_show_partitions" { SELECT pg_get_constraintdef(c.oid) FROM pg_inherits i LEFT JOIN pg_constraint c ON c.conrelid = i.inhrelid - WHERE i.inhparent = 'range_rel'::regclass + WHERE i.inhparent = 'range_rel'::regclass AND c.contype = 'c' ORDER BY c.oid; } step "s2r" { ROLLBACK; } step "s2c" { COMMIT; } diff --git a/specs/rollback_on_create_partitions.spec b/specs/rollback_on_create_partitions.spec index a24c2897..806e6072 100644 --- a/specs/rollback_on_create_partitions.spec +++ b/specs/rollback_on_create_partitions.spec @@ -22,7 +22,7 @@ step "rollback_a" { ROLLBACK TO SAVEPOINT a; } step "savepoint_b" { SAVEPOINT b; } step "rollback_b" { ROLLBACK TO SAVEPOINT b; } step "savepoint_c" { SAVEPOINT c; } -step "show_rel" { EXPLAIN (COSTS OFF) SELECT * FROM range_rel; } +step "show_rel" { SELECT l.parent, l.partition FROM pathman_partition_list l WHERE l.parent = 'range_rel'::regclass; } permutation "begin" "insert_data" "create_partitions" "show_rel" "rollback" "show_rel" diff --git a/sql/pathman_CVE-2020-14350.sql b/sql/pathman_CVE-2020-14350.sql new file mode 100644 index 00000000..07daa617 --- /dev/null +++ b/sql/pathman_CVE-2020-14350.sql @@ -0,0 +1,78 @@ +/* + * Check fix for CVE-2020-14350. + * See also 7eeb1d986 postgresql commit. + */ + +SET client_min_messages = 'warning'; +DROP FUNCTION IF EXISTS _partition_data_concurrent(oid,integer); +DROP FUNCTION IF EXISTS create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE IF EXISTS test1 CASCADE; +DROP TABLE IF EXISTS test2 CASCADE; +DROP ROLE IF EXISTS pathman_regress_hacker; +SET client_min_messages = 'notice'; +GRANT CREATE ON SCHEMA public TO PUBLIC; + +CREATE EXTENSION pg_pathman; +CREATE ROLE pathman_regress_hacker LOGIN; + +-- Test 1 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; + +SET ROLE pathman_regress_hacker; +SHOW is_superuser; +CREATE FUNCTION _partition_data_concurrent(relation oid, p_limit INT, OUT p_total BIGINT) +RETURNS bigint +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + SELECT _partition_data_concurrent(relation, NULL::text, NULL::text, p_limit) INTO p_total; +END +$$ LANGUAGE plpgsql; + +CREATE TABLE test1(i INT4 NOT NULL); +INSERT INTO test1 SELECT generate_series(1, 500); +SELECT create_hash_partitions('test1', 'i', 5, false); + +RESET ROLE; +SELECT partition_table_concurrently('test1', 10, 1); +SELECT pg_sleep(1); + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + +-- Test 2 +RESET ROLE; +ALTER ROLE pathman_regress_hacker NOSUPERUSER; + +SET ROLE pathman_regress_hacker; +SHOW is_superuser; +CREATE FUNCTION create_single_range_partition(parent_relid TEXT, start_value ANYELEMENT, end_value ANYELEMENT, partition_name TEXT) +RETURNS REGCLASS +AS $$ +BEGIN + ALTER ROLE pathman_regress_hacker SUPERUSER; + RETURN create_single_range_partition(parent_relid, start_value, end_value, partition_name, NULL::text); +END +$$ LANGUAGE plpgsql; + +RESET ROLE; +CREATE TABLE test2(i INT4 NOT NULL); +INSERT INTO test2 VALUES(0); +SELECT create_range_partitions('test2', 'i', 0, 1); +INSERT INTO test2 values(1); + +-- Test result (must be 'off') +SET ROLE pathman_regress_hacker; +SHOW is_superuser; + +-- Cleanup +RESET ROLE; +DROP FUNCTION _partition_data_concurrent(oid,integer); +DROP FUNCTION create_single_range_partition(TEXT,ANYELEMENT,ANYELEMENT,TEXT); +DROP TABLE test1 CASCADE; +DROP TABLE test2 CASCADE; +DROP ROLE pathman_regress_hacker; +DROP EXTENSION pg_pathman; + diff --git a/sql/pathman_array_qual.sql b/sql/pathman_array_qual.sql index 7ab15b6a..9f1b0c1e 100644 --- a/sql/pathman_array_qual.sql +++ b/sql/pathman_array_qual.sql @@ -1,3 +1,8 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -422,5 +427,6 @@ DEALLOCATE q; -DROP SCHEMA array_qual CASCADE; +DROP TABLE array_qual.test CASCADE; +DROP SCHEMA array_qual; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_basic.sql b/sql/pathman_basic.sql index 808292ed..478935c5 100644 --- a/sql/pathman_basic.sql +++ b/sql/pathman_basic.sql @@ -1,3 +1,12 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Also, EXPLAIN now always shows key first in quals + * ('test commutator' queries). + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -158,14 +167,18 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE (id >= 500 AND id < 1500) OR (id > 2500); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); @@ -179,8 +192,11 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE false; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = NULL; EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2; +EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE 2 = value; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.hash_rel WHERE value = 2 OR value = 1; +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 = id; /* test commutator */ +EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE 2500 < id; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id > 2500; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1000 AND id < 3000; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id >= 1500 AND id < 2500; @@ -189,6 +205,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel ORDER BY id; EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id <= 2500 ORDER BY id; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2015-02-15'; +EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE '2015-02-15' < dt; /* test commutator */ EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-01' AND dt < '2015-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt >= '2015-02-15' AND dt < '2015-03-15'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE (dt >= '2015-01-15' AND dt < '2015-02-15') OR (dt > '2015-03-15'); @@ -207,18 +224,6 @@ SET enable_seqscan = OFF; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2015-03-01' ORDER BY dt; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel_1 UNION ALL SELECT * FROM test.range_rel_2 ORDER BY dt; -/* - * Join - */ -set enable_nestloop = OFF; -SET enable_hashjoin = ON; -SET enable_mergejoin = OFF; -EXPLAIN (COSTS OFF) -SELECT * FROM test.range_rel j1 -JOIN test.range_rel j2 on j2.id = j1.id -JOIN test.num_range_rel j3 on j3.id = j1.id -WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; - /* * Test inlined SQL functions */ @@ -255,6 +260,7 @@ DROP TABLE test.hash_varchar CASCADE; /* Split first partition in half */ SELECT pathman.split_range_partition('test.num_range_rel_1', 500); EXPLAIN (COSTS OFF) SELECT * FROM test.num_range_rel WHERE id BETWEEN 100 AND 700; +SELECT tableoid::regclass, id FROM test.num_range_rel WHERE id IN (499, 500, 501) ORDER BY id; SELECT pathman.split_range_partition('test.range_rel_1', '2015-01-15'::DATE); @@ -382,6 +388,19 @@ DROP TABLE test.num_range_rel CASCADE; DROP TABLE test.range_rel CASCADE; +/* Test attributes copying */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL) +WITH (fillfactor = 70); +INSERT INTO test.range_rel (dt) + SELECT g FROM generate_series('2015-01-01', '2015-02-15', '1 month'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::date, '1 month'::interval); +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel'::REGCLASS; +SELECT reloptions, relpersistence FROM pg_class WHERE oid='test.range_rel_1'::REGCLASS; +DROP TABLE test.range_rel CASCADE; + /* Test automatic partition creation */ CREATE TABLE test.range_rel ( id SERIAL PRIMARY KEY, @@ -434,13 +453,6 @@ INSERT INTO test."TeSt" VALUES (1, 1); INSERT INTO test."TeSt" VALUES (2, 2); INSERT INTO test."TeSt" VALUES (3, 3); SELECT * FROM test."TeSt"; -SELECT pathman.create_update_triggers('test."TeSt"'); -UPDATE test."TeSt" SET a = 1; -SELECT * FROM test."TeSt"; -SELECT * FROM test."TeSt" WHERE a = 1; -EXPLAIN (COSTS OFF) SELECT * FROM test."TeSt" WHERE a = 1; -SELECT pathman.drop_partitions('test."TeSt"'); -SELECT * FROM test."TeSt"; DROP TABLE test."TeSt" CASCADE; CREATE TABLE test."RangeRel" ( @@ -491,22 +503,6 @@ SELECT prepend_range_partition('test.range_rel'); EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt < '2010-03-01'; EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel WHERE dt > '2010-12-15'; -/* Temporary table for JOINs */ -CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); -INSERT INTO test.tmp VALUES (1, 1), (2, 2); - -/* Test UPDATE and DELETE */ -EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; -UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; -DELETE FROM test.range_rel WHERE dt = '2010-06-15'; -SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; -EXPLAIN (COSTS OFF) UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -UPDATE test.range_rel r SET value = t.value FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; -EXPLAIN (COSTS OFF) DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; -DELETE FROM test.range_rel r USING test.tmp t WHERE r.dt = '2010-01-02' AND r.id = t.id; - /* Create range partitions from whole range */ SELECT drop_partitions('test.range_rel'); @@ -559,7 +555,25 @@ ORDER BY partition; DROP TABLE test.provided_part_names CASCADE; +/* test preventing of double expand of inherited tables */ +CREATE TABLE test.mixinh_parent (id INT PRIMARY KEY); +CREATE TABLE test.mixinh_child1 () INHERITS (test.mixinh_parent); +SELECT create_range_partitions('test.mixinh_child1', 'id', 1, 10, 1); +INSERT INTO test.mixinh_child1 VALUES (1); +SELECT * FROM test.mixinh_child1; +SELECT * FROM test.mixinh_parent; -DROP SCHEMA test CASCADE; +DROP TABLE test.hash_rel CASCADE; +DROP TABLE test.index_on_childs CASCADE; +DROP TABLE test.mixinh_child1 CASCADE; +DROP TABLE test.mixinh_parent CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.hash_rel_wrong CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP TABLE test.range_rel_archive CASCADE; +DROP TABLE test.special_case_1_ind_o_s CASCADE; +DROP TABLE test.range_rel_test1 CASCADE; +DROP TABLE test.range_rel_test2 CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_bgw.sql b/sql/pathman_bgw.sql index 90165f4c..74239e99 100644 --- a/sql/pathman_bgw.sql +++ b/sql/pathman_bgw.sql @@ -5,6 +5,7 @@ CREATE EXTENSION pg_pathman; CREATE SCHEMA test_bgw; + /* * Tests for SpawnPartitionsWorker */ @@ -53,6 +54,96 @@ SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 par DROP TABLE test_bgw.test_4 CASCADE; +/* test error handling in BGW */ +CREATE TABLE test_bgw.test_5(val INT4 NOT NULL); +SELECT create_range_partitions('test_bgw.test_5', 'val', 1, 10, 2); + +CREATE OR REPLACE FUNCTION test_bgw.abort_xact(args JSONB) +RETURNS VOID AS $$ +BEGIN + RAISE EXCEPTION 'aborting xact!'; +END +$$ language plpgsql; + +SELECT set_spawn_using_bgw('test_bgw.test_5', true); +SELECT set_init_callback('test_bgw.test_5', 'test_bgw.abort_xact(jsonb)'); +INSERT INTO test_bgw.test_5 VALUES (-100); +SELECT * FROM pathman_partition_list ORDER BY partition; /* should contain 3 partitions */ + +DROP FUNCTION test_bgw.abort_xact(args JSONB); +DROP TABLE test_bgw.test_5 CASCADE; + + + +/* + * Tests for ConcurrentPartWorker + */ -DROP SCHEMA test_bgw CASCADE; +CREATE TABLE test_bgw.conc_part(id INT4 NOT NULL); +INSERT INTO test_bgw.conc_part SELECT generate_series(1, 500); +SELECT create_hash_partitions('test_bgw.conc_part', 'id', 5, false); + +BEGIN; +/* Also test FOR SHARE/UPDATE conflicts in BGW */ +SELECT * FROM test_bgw.conc_part ORDER BY id LIMIT 1 FOR SHARE; +/* Run partitioning bgworker */ +SELECT partition_table_concurrently('test_bgw.conc_part', 10, 1); +/* Wait until bgworker starts */ +SELECT pg_sleep(1); +ROLLBACK; + +/* Wait until it finises */ +DO $$ +DECLARE + ops int8; + rows int8; + rows_old int8 := 0; + i int4 := 0; -- protect from endless loop +BEGIN + LOOP + -- get total number of processed rows + SELECT processed + FROM pathman_concurrent_part_tasks + WHERE relid = 'test_bgw.conc_part'::regclass + INTO rows; + + -- get number of partitioning tasks + GET DIAGNOSTICS ops = ROW_COUNT; + + IF ops > 0 THEN + PERFORM pg_sleep(0.2); + + ASSERT rows IS NOT NULL; + + IF rows_old = rows THEN + i = i + 1; + ELSIF rows < rows_old THEN + RAISE EXCEPTION 'rows is decreasing: new %, old %', rows, rows_old; + ELSIF rows > 500 THEN + RAISE EXCEPTION 'processed % rows', rows; + END IF; + ELSE + EXIT; -- exit loop + END IF; + + IF i > 500 THEN + RAISE WARNING 'looks like partitioning bgw is stuck!'; + EXIT; -- exit loop + END IF; + + rows_old = rows; + END LOOP; +END +$$ LANGUAGE plpgsql; + +/* Check amount of tasks and rows in parent and partitions */ +SELECT count(*) FROM pathman_concurrent_part_tasks; +SELECT count(*) FROM ONLY test_bgw.conc_part; +SELECT count(*) FROM test_bgw.conc_part; + +DROP TABLE test_bgw.conc_part CASCADE; + + + +DROP SCHEMA test_bgw; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cache_pranks.sql b/sql/pathman_cache_pranks.sql new file mode 100644 index 00000000..e3fe00d9 --- /dev/null +++ b/sql/pathman_cache_pranks.sql @@ -0,0 +1,122 @@ +\set VERBOSITY terse +-- is pathman (caches, in particular) strong enough to carry out this? + +SET search_path = 'public'; + +-- make sure nothing breaks on disable/enable when nothing was initialized yet +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; + +-- wobble with create-drop ext: tests cached relids sanity +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = true; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +DROP EXTENSION pg_pathman; + +-- create it for further tests +CREATE EXTENSION pg_pathman; + +-- 079797e0d5 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 30); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT set_interval('part_test', 100); +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT drop_partitions('part_test'); +SELECT disable_pathman_for('part_test'); + +CREATE TABLE wrong_partition (LIKE part_test) INHERITS (part_test); +SELECT add_to_pathman_config('part_test', 'val', '10'); +SELECT add_to_pathman_config('part_test', 'val'); + +DROP TABLE part_test CASCADE; +-- + +-- 85fc5ccf121 +CREATE TABLE part_test(val serial); +INSERT INTO part_test SELECT generate_series(1, 3000); +SELECT create_range_partitions('part_test', 'val', 1, 10); +SELECT append_range_partition('part_test'); +DELETE FROM part_test; +SELECT create_single_range_partition('part_test', NULL::INT4, NULL); /* not ok */ +DELETE FROM pathman_config WHERE partrel = 'part_test'::REGCLASS; +SELECT create_hash_partitions('part_test', 'val', 2, partition_names := ARRAY[]::TEXT[]); /* not ok */ + +DROP TABLE part_test CASCADE; +-- +-- +-- PGPRO-7870 +-- Added error for case executing prepared query after DROP/CREATE EXTENSION. +-- +-- DROP/CREATE extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE disabled extension +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 898]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 10); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +SET pg_pathman.enable = f; +DROP EXTENSION pg_pathman; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = t; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- DROP/CREATE extension in autonomous transaction +CREATE TABLE part_test(a INT4 NOT NULL, b INT4); +PREPARE q(int4) AS SELECT * FROM part_test WHERE a > ALL (array[$1, 198]); +SELECT create_range_partitions('part_test', 'a', 1, 100, 2); + +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); +EXECUTE q(1); + +BEGIN; + BEGIN AUTONOMOUS; + DROP EXTENSION pg_pathman; + CREATE EXTENSION pg_pathman; + COMMIT; +COMMIT; + +EXECUTE q(1); + +DEALLOCATE q; +DROP TABLE part_test CASCADE; + +-- finalize +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_calamity.sql b/sql/pathman_calamity.sql index 881cebbd..ecc2c30f 100644 --- a/sql/pathman_calamity.sql +++ b/sql/pathman_calamity.sql @@ -1,5 +1,16 @@ -\set VERBOSITY terse +/* + * pathman_calamity.out and pathman_calamity_1.out differ only in that since + * 12 we get + * ERROR: invalid input syntax for type integer: "abc" + * instead of + * ERROR: invalid input syntax for integer: "15.6" + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ +\set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA calamity; @@ -7,7 +18,7 @@ CREATE SCHEMA calamity; /* call for coverage test */ set client_min_messages = ERROR; SELECT debug_capture(); -SELECT get_pathman_lib_version(); +SELECT pathman_version(); set client_min_messages = NOTICE; @@ -132,14 +143,14 @@ SELECT build_range_condition('calamity.part_test', 'val', 10, NULL); /* OK */ SELECT build_range_condition('calamity.part_test', 'val', NULL, 10); /* OK */ /* check function validate_interval_value() */ -SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value(NULL, 'expr', 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', NULL, 2, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon', 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon', NULL); /* not ok */ -SELECT validate_interval_value('pg_class', 'expr', 2, NULL, 'cooked_expr'); /* not ok */ -SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH', NULL); /* not ok */ +SELECT validate_interval_value(1::REGCLASS, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value(NULL, 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', NULL, 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', NULL, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'relname', 1, 'HASH'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, '1 mon'); /* not ok */ +SELECT validate_interval_value('pg_class', 'expr', 2, NULL); /* not ok */ +SELECT validate_interval_value('pg_class', 'EXPR', 1, 'HASH'); /* not ok */ /* check function validate_relname() */ SELECT validate_relname('calamity.part_test'); @@ -179,16 +190,6 @@ SELECT build_check_constraint_name('calamity.part_test'); /* OK */ SELECT build_check_constraint_name(0::REGCLASS); /* not ok */ SELECT build_check_constraint_name(NULL) IS NULL; -/* check function build_update_trigger_name() */ -SELECT build_update_trigger_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_name(NULL) IS NULL; - -/* check function build_update_trigger_func_name() */ -SELECT build_update_trigger_func_name('calamity.part_test'); /* OK */ -SELECT build_update_trigger_func_name(0::REGCLASS); /* not ok */ -SELECT build_update_trigger_func_name(NULL) IS NULL; - /* check function build_sequence_name() */ SELECT build_sequence_name('calamity.part_test'); /* OK */ SELECT build_sequence_name(1::REGCLASS); /* not ok */ @@ -222,9 +223,6 @@ SELECT generate_range_bounds('1-jan-2017'::DATE, SELECT check_range_available(NULL, NULL::INT4, NULL); /* not ok */ SELECT check_range_available('pg_class', 1, 10); /* OK (not partitioned) */ -SELECT has_update_trigger(NULL); -SELECT has_update_trigger(0::REGCLASS); /* not ok */ - /* check invoke_on_partition_created_callback() */ CREATE FUNCTION calamity.dummy_cb(arg jsonb) RETURNS void AS $$ begin @@ -333,8 +331,8 @@ DROP TABLE calamity.test_range_oid CASCADE; /* check function merge_range_partitions() */ -SELECT merge_range_partitions('{pg_class}'); /* not ok */ -SELECT merge_range_partitions('{pg_class, pg_inherits}'); /* not ok */ +SELECT merge_range_partitions('pg_class'); /* not ok */ +SELECT merge_range_partitions('pg_class', 'pg_inherits'); /* not ok */ CREATE TABLE calamity.merge_test_a(val INT4 NOT NULL); CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); @@ -342,29 +340,38 @@ CREATE TABLE calamity.merge_test_b(val INT4 NOT NULL); SELECT create_range_partitions('calamity.merge_test_a', 'val', 1, 10, 2); SELECT create_range_partitions('calamity.merge_test_b', 'val', 1, 10, 2); -SELECT merge_range_partitions('{calamity.merge_test_a_1, - calamity.merge_test_b_1}'); /* not ok */ +SELECT merge_range_partitions('calamity.merge_test_a_1', + 'calamity.merge_test_b_1'); /* not ok */ DROP TABLE calamity.merge_test_a,calamity.merge_test_b CASCADE; +DROP DOMAIN calamity.test_domain; +DROP TABLE calamity.part_test CASCADE; +DROP TABLE calamity.part_ok CASCADE; +DROP TABLE calamity.hash_two_times CASCADE; +DROP TABLE calamity.to_be_disabled CASCADE; +DROP SCHEMA calamity; +DROP EXTENSION pg_pathman; -/* check function drop_triggers() */ -CREATE TABLE calamity.trig_test_tbl(val INT4 NOT NULL); -SELECT create_hash_partitions('calamity.trig_test_tbl', 'val', 2); -SELECT create_update_triggers('calamity.trig_test_tbl'); - -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; -SELECT drop_triggers('calamity.trig_test_tbl'); /* OK */ -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl'::REGCLASS; -SELECT count(*) FROM pg_trigger WHERE tgrelid = 'calamity.trig_test_tbl_1'::REGCLASS; +/* + * ------------------------------- + * Special tests (SET statement) + * ------------------------------- + */ -DROP TABLE calamity.trig_test_tbl CASCADE; +CREATE EXTENSION pg_pathman; +SET pg_pathman.enable = false; +SET pg_pathman.enable = true; +SET pg_pathman.enable = false; +RESET pg_pathman.enable; +RESET ALL; +BEGIN; ROLLBACK; +BEGIN ISOLATION LEVEL SERIALIZABLE; ROLLBACK; +BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; ROLLBACK; -DROP SCHEMA calamity CASCADE; DROP EXTENSION pg_pathman; @@ -381,9 +388,11 @@ CREATE EXTENSION pg_pathman; /* check that cache loading is lazy */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Change this setting for code coverage */ SET pg_pathman.enable_bounds_cache = false; @@ -392,9 +401,11 @@ SET pg_pathman.enable_bounds_cache = false; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* Restore this GUC */ SET pg_pathman.enable_bounds_cache = true; @@ -403,21 +414,26 @@ SET pg_pathman.enable_bounds_cache = true; CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ /* check that parents cache has been flushed after partition was dropped */ CREATE TABLE calamity.test_pathman_cache_stats(val NUMERIC NOT NULL); SELECT create_range_partitions('calamity.test_pathman_cache_stats', 'val', 1, 10, 10); EXPLAIN (COSTS OFF) SELECT * FROM calamity.test_pathman_cache_stats; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ SELECT drop_range_partition('calamity.test_pathman_cache_stats_1'); -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ DROP TABLE calamity.test_pathman_cache_stats CASCADE; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; /* OK */ +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* OK */ -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; @@ -456,5 +472,5 @@ EXPLAIN (COSTS OFF) SELECT * FROM calamity.survivor; /* OK */ DROP TABLE calamity.survivor CASCADE; -DROP SCHEMA calamity CASCADE; +DROP SCHEMA calamity; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_callbacks.sql b/sql/pathman_callbacks.sql index 79325a2c..096a55ad 100644 --- a/sql/pathman_callbacks.sql +++ b/sql/pathman_callbacks.sql @@ -1,10 +1,11 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA callbacks; -/* Check callbacks */ + +/* callback #1 */ CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) RETURNS VOID AS $$ BEGIN @@ -12,15 +13,15 @@ BEGIN END $$ language plpgsql; - - -/* callback is in public namespace, must be schema-qualified */ +/* callback #2 */ CREATE OR REPLACE FUNCTION public.dummy_cb(args JSONB) RETURNS VOID AS $$ BEGIN END $$ language plpgsql; + + CREATE TABLE callbacks.abc(a serial, b int); SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); @@ -78,6 +79,7 @@ SELECT create_hash_partitions('callbacks.abc', 'a', 5); DROP TABLE callbacks.abc CASCADE; + /* test the temprary deletion of callback function */ CREATE TABLE callbacks.abc(a serial, b int); SELECT set_init_callback('callbacks.abc', @@ -85,18 +87,17 @@ SELECT set_init_callback('callbacks.abc', SELECT create_range_partitions('callbacks.abc', 'a', 1, 100, 2); INSERT INTO callbacks.abc VALUES (201, 0); /* +1 new partition */ + +BEGIN; DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); INSERT INTO callbacks.abc VALUES (301, 0); /* +0 new partitions (ERROR) */ -CREATE OR REPLACE FUNCTION callbacks.abc_on_part_created_callback(args JSONB) -RETURNS VOID AS $$ -BEGIN - RAISE WARNING 'callback arg: %', args::TEXT; -END -$$ language plpgsql; +ROLLBACK; + INSERT INTO callbacks.abc VALUES (301, 0); /* +1 new partition */ DROP TABLE callbacks.abc CASCADE; + /* more complex test using rotation of tables */ CREATE TABLE callbacks.abc(a INT4 NOT NULL); INSERT INTO callbacks.abc @@ -107,22 +108,22 @@ CREATE OR REPLACE FUNCTION callbacks.rotation_callback(params jsonb) RETURNS VOID AS $$ DECLARE - relation regclass; + relation regclass; parent_rel regclass; BEGIN parent_rel := concat(params->>'partition_schema', '.', params->>'parent')::regclass; - -- drop "old" partitions - FOR relation IN (SELECT partition FROM + -- drop "old" partitions + FOR relation IN (SELECT partition FROM (SELECT partition, range_min::INT4 FROM pathman_partition_list WHERE parent = parent_rel ORDER BY range_min::INT4 DESC OFFSET 4) t -- remain 4 last partitions ORDER BY range_min) - LOOP - RAISE NOTICE 'dropping partition %', relation; - PERFORM drop_range_partition(relation); - END LOOP; + LOOP + RAISE NOTICE 'dropping partition %', relation; + PERFORM drop_range_partition(relation); + END LOOP; END $$ LANGUAGE plpgsql; @@ -140,6 +141,11 @@ SELECT * FROM pathman_partition_list WHERE parent = 'callbacks.abc'::REGCLASS ORDER BY range_min::INT4; + + DROP TABLE callbacks.abc CASCADE; -DROP SCHEMA callbacks CASCADE; +DROP FUNCTION callbacks.abc_on_part_created_callback(jsonb); +DROP FUNCTION public.dummy_cb(jsonb); +DROP FUNCTION callbacks.rotation_callback(jsonb); +DROP SCHEMA callbacks; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_column_type.sql b/sql/pathman_column_type.sql index 47d38cc5..d3f16107 100644 --- a/sql/pathman_column_type.sql +++ b/sql/pathman_column_type.sql @@ -1,3 +1,8 @@ +/* + * In 9ce77d75c5a (>= 13) struct Var was changed, which caused the output + * of get_partition_cooked_key to change. + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -15,21 +20,34 @@ SELECT create_range_partitions('test_column_type.test', 'val', 1, 10, 10); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; -/* change column's type (should flush caches) */ +/* + * Get parsed and analyzed expression. + */ +CREATE FUNCTION get_cached_partition_cooked_key(REGCLASS) +RETURNS TEXT AS 'pg_pathman', 'get_cached_partition_cooked_key_pl' +LANGUAGE C STRICT; + +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); + +/* change column's type (should also flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; -/* check that parsed expression was cleared */ -SELECT partrel, cooked_expr FROM pathman_config; +/* check that correct expression has been built */ +SELECT get_partition_key_type('test_column_type.test'::REGCLASS); +SELECT get_partition_cooked_key('test_column_type.test'::REGCLASS); +SELECT get_cached_partition_cooked_key('test_column_type.test'::REGCLASS); +DROP FUNCTION get_cached_partition_cooked_key(REGCLASS); /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -/* check that expression has been built */ -SELECT partrel, cooked_expr FROM pathman_config; - -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -49,21 +67,24 @@ SELECT create_hash_partitions('test_column_type.test', 'id', 5); /* make sure that bounds and dispatch info has been cached */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should NOT work) */ ALTER TABLE test_column_type.test ALTER id TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* change column's type (should flush caches) */ ALTER TABLE test_column_type.test ALTER val TYPE NUMERIC; /* make sure that everything works properly */ SELECT * FROM test_column_type.test; -SELECT context, entries FROM pathman_cache_stats ORDER BY context; +SELECT context, entries FROM pathman_cache_stats + WHERE context != 'partition status cache' ORDER BY context; /* check insert dispatching */ INSERT INTO test_column_type.test VALUES (1); @@ -73,5 +94,5 @@ SELECT drop_partitions('test_column_type.test'); DROP TABLE test_column_type.test CASCADE; -DROP SCHEMA test_column_type CASCADE; +DROP SCHEMA test_column_type; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_cte.sql b/sql/pathman_cte.sql index 04af82f0..594c6db7 100644 --- a/sql/pathman_cte.sql +++ b/sql/pathman_cte.sql @@ -1,15 +1,17 @@ +/* + * Test simple CTE queries. + * Since 12 (608b167f9f), CTEs which are scanned once are no longer an + * optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_cte_1.out instead. + */ + \set VERBOSITY terse SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_cte; - - -/* - * Test simple CTE queries - */ - CREATE TABLE test_cte.range_rel ( id INT4, dt TIMESTAMP NOT NULL, @@ -155,5 +157,6 @@ SELECT * FROM test; -DROP SCHEMA test_cte CASCADE; +DROP TABLE test_cte.recursive_cte_test_tbl CASCADE; +DROP SCHEMA test_cte; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_declarative.sql b/sql/pathman_declarative.sql new file mode 100644 index 00000000..eb12c295 --- /dev/null +++ b/sql/pathman_declarative.sql @@ -0,0 +1,50 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt DATE NOT NULL +); +CREATE TABLE test.r2 (LIKE test.range_rel); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); + +INSERT INTO test.range_rel (dt) +SELECT g FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) AS g; +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2015-01-01'::DATE, '1 month'::INTERVAL); + +SELECT * FROM pathman.pathman_partition_list; +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES IN ('2015-05-01', '2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +ALTER TABLE test.range_rel ATTACH PARTITION test.r2 + FOR VALUES FROM ('2015-05-01') TO ('2015-06-01'); +SELECT * FROM pathman.pathman_partition_list; +\d+ test.r2; +ALTER TABLE test.range_rel DETACH PARTITION test.r2; +SELECT * FROM pathman.pathman_partition_list; +\d+ test.r2; + +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES IN ('2015-05-01', '2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2014-05-01') TO ('2015-06-01'); +CREATE TABLE test.r4 PARTITION OF test.range_rel + FOR VALUES FROM ('2015-06-01') TO ('2016-01-01'); +\d+ test.r4; + +/* Note: PG-10 doesn't support ATTACH PARTITION ... DEFAULT */ +ALTER TABLE IF EXISTS test.nonexistent_table ATTACH PARTITION baz FOR VALUES IN (42); +ALTER TABLE IF EXISTS test.nonexistent_table DETACH PARTITION baz; + +DROP TABLE test.r2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_domains.sql b/sql/pathman_domains.sql index f6ee7076..105b2399 100644 --- a/sql/pathman_domains.sql +++ b/sql/pathman_domains.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA domains; @@ -40,5 +41,7 @@ SELECT * FROM pathman_partition_list ORDER BY "partition"::TEXT; -DROP SCHEMA domains CASCADE; +DROP TABLE domains.dom_table CASCADE; +DROP DOMAIN domains.dom_test CASCADE; +DROP SCHEMA domains; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_dropped_cols.sql b/sql/pathman_dropped_cols.sql new file mode 100644 index 00000000..2a128df2 --- /dev/null +++ b/sql/pathman_dropped_cols.sql @@ -0,0 +1,104 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA dropped_cols; + + +/* + * we should be able to manage tables with dropped columns + */ + +create table test_range(a int, b int, key int not null); + +alter table test_range drop column a; +select create_range_partitions('test_range', 'key', 1, 10, 2); + +alter table test_range drop column b; +select prepend_range_partition('test_range'); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_range_3_check'; + +drop table test_range cascade; + + +create table test_hash(a int, b int, key int not null); + +alter table test_hash drop column a; +select create_hash_partitions('test_hash', 'key', 3); + +alter table test_hash drop column b; +create table test_dummy (like test_hash); +select replace_hash_partition('test_hash_2', 'test_dummy', true); + +select * from pathman_partition_list order by parent, partition; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_hash_1_check'; +select pg_get_constraintdef(oid, true) from pg_constraint where conname = 'pathman_test_dummy_check'; +drop table test_hash cascade; + +-- Yury Smirnov case +CREATE TABLE root_dict ( + id BIGSERIAL PRIMARY KEY NOT NULL, + root_id BIGINT NOT NULL, + start_date DATE, + num TEXT, + main TEXT, + dict_code TEXT, + dict_name TEXT, + edit_num TEXT, + edit_date DATE, + sign CHAR(4) +); + +CREATE INDEX "root_dict_root_id_idx" ON "root_dict" ("root_id"); + +DO +$$ +DECLARE + r RECORD; +BEGIN + FOR r IN SELECT * FROM generate_series(1, 3) r + LOOP + FOR d IN 1..2 LOOP + INSERT INTO root_dict (root_id, start_date, num, main, dict_code, dict_name, edit_num, edit_date, sign) VALUES + (r.r, '2010-10-10'::date, 'num_' || d, (d % 2) + 1, 'code_' || d, 'name_' || d, NULL, NULL, '2014'); + END LOOP; + END LOOP; +END +$$; + +ALTER TABLE root_dict ADD COLUMN dict_id BIGINT DEFAULT 3; +ALTER TABLE root_dict DROP COLUMN dict_code, + DROP COLUMN dict_name, + DROP COLUMN sign; + +SELECT create_hash_partitions('root_dict' :: REGCLASS, + 'root_id', + 3, + true); +VACUUM FULL ANALYZE "root_dict"; +SELECT set_enable_parent('root_dict' :: REGCLASS, FALSE); + +PREPARE getbyroot AS +SELECT + id, root_id, start_date, num, main, edit_num, edit_date, dict_id +FROM root_dict +WHERE root_id = $1; + +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); + +-- errors usually start here +EXECUTE getbyroot(2); +EXECUTE getbyroot(2); +EXPLAIN (COSTS OFF) EXECUTE getbyroot(2); + +DEALLOCATE getbyroot; +DROP TABLE root_dict CASCADE; +DROP SCHEMA dropped_cols; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_expressions.sql b/sql/pathman_expressions.sql index 1c7f4dbe..bf29f896 100644 --- a/sql/pathman_expressions.sql +++ b/sql/pathman_expressions.sql @@ -1,3 +1,13 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_expressions_2.out is the updated version. + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -168,17 +178,9 @@ INSERT INTO test_exprs.range_rel_6 (dt, txt) VALUES ('2020-01-01'::DATE, md5('as SELECT COUNT(*) FROM test_exprs.range_rel_6; EXPLAIN (COSTS OFF) SELECT * FROM test_exprs.range_rel WHERE (AGE(dt, '2000-01-01'::DATE)) = '18 years'::interval; -SELECT create_update_triggers('test_exprs.range_rel'); -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; -UPDATE test_exprs.range_rel SET dt = '2016-12-01' WHERE dt >= '2015-10-10' AND dt <= '2017-10-10'; - -/* counts in partitions should be changed */ -SELECT COUNT(*) FROM test_exprs.range_rel; -SELECT COUNT(*) FROM test_exprs.range_rel_1; -SELECT COUNT(*) FROM test_exprs.range_rel_2; - - -DROP SCHEMA test_exprs CASCADE; +DROP TABLE test_exprs.canary CASCADE; +DROP TABLE test_exprs.canary_copy CASCADE; +DROP TABLE test_exprs.range_rel CASCADE; +DROP TABLE test_exprs.hash_rel CASCADE; +DROP SCHEMA test_exprs; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_foreign_keys.sql b/sql/pathman_foreign_keys.sql index 392b3a7a..74dee25f 100644 --- a/sql/pathman_foreign_keys.sql +++ b/sql/pathman_foreign_keys.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA fkeys; @@ -51,5 +52,7 @@ DROP TABLE fkeys.messages, fkeys.replies CASCADE; -DROP SCHEMA fkeys CASCADE; +DROP TABLE fkeys.test_fkey CASCADE; +DROP TABLE fkeys.test_ref CASCADE; +DROP SCHEMA fkeys; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_gaps.sql b/sql/pathman_gaps.sql new file mode 100644 index 00000000..129b210c --- /dev/null +++ b/sql/pathman_gaps.sql @@ -0,0 +1,145 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA gaps; + + + +CREATE TABLE gaps.test_1(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_1', 'val', 1, 10, 3); +DROP TABLE gaps.test_1_2; + +CREATE TABLE gaps.test_2(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_2', 'val', 1, 10, 5); +DROP TABLE gaps.test_2_3; + +CREATE TABLE gaps.test_3(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_3', 'val', 1, 10, 8); +DROP TABLE gaps.test_3_4; + +CREATE TABLE gaps.test_4(val INT8 NOT NULL); +SELECT create_range_partitions('gaps.test_4', 'val', 1, 10, 11); +DROP TABLE gaps.test_4_4; +DROP TABLE gaps.test_4_5; + + + +/* Check existing partitions */ +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val = 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val <= 21; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 16; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_1 WHERE val >= 21; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val = 31; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val <= 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 11; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 26; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_2 WHERE val >= 31; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val = 41; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val <= 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_3 WHERE val >= 41; + + +/* Pivot values */ +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val = 51; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val < 61; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val <= 61; + +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 21; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 31; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 36; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 41; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 46; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val > 51; +EXPLAIN (COSTS OFF) SELECT * FROM gaps.test_4 WHERE val >= 51; + + + +DROP TABLE gaps.test_1 CASCADE; +DROP TABLE gaps.test_2 CASCADE; +DROP TABLE gaps.test_3 CASCADE; +DROP TABLE gaps.test_4 CASCADE; +DROP SCHEMA gaps; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_hashjoin.sql b/sql/pathman_hashjoin.sql new file mode 100644 index 00000000..620dee5f --- /dev/null +++ b/sql/pathman_hashjoin.sql @@ -0,0 +1,56 @@ +/* + * pathman_hashjoin_1.out and pathman_hashjoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_hashjoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +CREATE INDEX ON test.range_rel (dt); +INSERT INTO test.range_rel (dt, txt) + SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT pathman.create_range_partitions('test.range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE test.num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT pathman.create_range_partitions('test.num_range_rel', 'id', 0, 1000, 4); +INSERT INTO test.num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +SET pg_pathman.enable_runtimeappend = OFF; +SET pg_pathman.enable_runtimemergeappend = OFF; +VACUUM; + +/* + * Hash join + */ +SET enable_indexscan = ON; +SET enable_seqscan = OFF; +SET enable_nestloop = OFF; +SET enable_hashjoin = ON; +SET enable_mergejoin = OFF; + +EXPLAIN (COSTS OFF) +SELECT * FROM test.range_rel j1 +JOIN test.range_rel j2 on j2.id = j1.id +JOIN test.num_range_rel j3 on j3.id = j1.id +WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; + +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_inserts.sql b/sql/pathman_inserts.sql index ff46c848..aa5b6c1c 100644 --- a/sql/pathman_inserts.sql +++ b/sql/pathman_inserts.sql @@ -1,3 +1,8 @@ +/* + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -163,6 +168,33 @@ FROM generate_series(-2, 130, 5) i RETURNING e * 2, b, tableoid::regclass; +/* test EXPLAIN (VERBOSE) - for PartitionFilter's targetlists */ +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT i, i, i +FROM generate_series(1, 10) i +RETURNING e * 2, b, tableoid::regclass; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (d, e) SELECT i, i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT i +FROM generate_series(1, 10) i; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d, e) SELECT b, d, e +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b, d) SELECT b, d +FROM test_inserts.storage; + +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO test_inserts.storage (b) SELECT b +FROM test_inserts.storage; + + /* test gap case (missing partition in between) */ CREATE TABLE test_inserts.test_gap(val INT NOT NULL); INSERT INTO test_inserts.test_gap SELECT generate_series(1, 30); @@ -172,5 +204,28 @@ INSERT INTO test_inserts.test_gap VALUES(15); /* not ok */ DROP TABLE test_inserts.test_gap CASCADE; -DROP SCHEMA test_inserts CASCADE; +/* test a few "special" ONLY queries used in pg_repack */ +CREATE TABLE test_inserts.test_special_only(val INT NOT NULL); +INSERT INTO test_inserts.test_special_only SELECT generate_series(1, 30); +SELECT create_hash_partitions('test_inserts.test_special_only', 'val', 4); + +/* create table as select only */ +CREATE TABLE test_inserts.special_1 AS SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_1; +DROP TABLE test_inserts.special_1; + +/* insert into ... select only */ +CREATE TABLE test_inserts.special_2 AS SELECT * FROM ONLY test_inserts.test_special_only WITH NO DATA; +INSERT INTO test_inserts.special_2 SELECT * FROM ONLY test_inserts.test_special_only; +SELECT count(*) FROM test_inserts.special_2; +DROP TABLE test_inserts.special_2; + +DROP TABLE test_inserts.test_special_only CASCADE; + + +DROP TABLE test_inserts.storage CASCADE; +DROP FUNCTION test_inserts.set_triggers(jsonb); +DROP FUNCTION test_inserts.print_cols_before_change(); +DROP FUNCTION test_inserts.print_cols_after_change(); +DROP SCHEMA test_inserts; DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_interval.sql b/sql/pathman_interval.sql index 59393ca4..3a457e7a 100644 --- a/sql/pathman_interval.sql +++ b/sql/pathman_interval.sql @@ -1,5 +1,6 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA test_interval; @@ -167,5 +168,5 @@ DROP TABLE test_interval.abc CASCADE; -DROP SCHEMA test_interval CASCADE; +DROP SCHEMA test_interval; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_join_clause.sql b/sql/pathman_join_clause.sql index 90287201..aa30b0b8 100644 --- a/sql/pathman_join_clause.sql +++ b/sql/pathman_join_clause.sql @@ -1,5 +1,9 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_gaps_1.out is the updated version. + */ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; @@ -102,7 +106,11 @@ WHERE test.parent.owner_id = 3 and test.parent.id IN (3, 4); -DROP SCHEMA test CASCADE; +DROP TABLE test.child CASCADE; +DROP TABLE test.child_nopart CASCADE; +DROP TABLE test.mytbl CASCADE; +DROP TABLE test.fk CASCADE; +DROP TABLE test.parent CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; - +DROP SCHEMA pathman; diff --git a/sql/pathman_lateral.sql b/sql/pathman_lateral.sql index 49dee604..d5def38c 100644 --- a/sql/pathman_lateral.sql +++ b/sql/pathman_lateral.sql @@ -1,3 +1,12 @@ +/* + * Sometimes join selectivity improvements patches in pgpro force nested loop + * members swap -- in pathman_lateral_1.out and pathman_lateral_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + */ + + \set VERBOSITY terse SET search_path = 'public'; @@ -36,5 +45,6 @@ set enable_mergejoin = on; -DROP SCHEMA test_lateral CASCADE; +DROP TABLE test_lateral.data CASCADE; +DROP SCHEMA test_lateral; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_mergejoin.sql b/sql/pathman_mergejoin.sql index 90bf3166..d1084375 100644 --- a/sql/pathman_mergejoin.sql +++ b/sql/pathman_mergejoin.sql @@ -1,3 +1,16 @@ +/* + * pathman_mergejoin_1.out and pathman_mergejoin_2.out seem to deal with pgpro's + * different behaviour. 8edd0e794 (>= 12) Append nodes with single subplan + * are eliminated, hence pathman_mergejoin_3.out + * + * Since 55a1954da16 and 6ef77cf46e8 (>= 13) output of EXPLAIN was changed, + * now it includes aliases for inherited tables. + * + * --------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * --------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -35,6 +48,9 @@ SET enable_hashjoin = OFF; SET enable_nestloop = OFF; SET enable_mergejoin = ON; +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + EXPLAIN (COSTS OFF) SELECT * FROM test.range_rel j1 JOIN test.range_rel j2 on j2.id = j1.id @@ -43,8 +59,10 @@ WHERE j1.dt < '2015-03-01' AND j2.dt >= '2015-02-01' ORDER BY j2.dt; SET enable_hashjoin = ON; SET enable_nestloop = ON; +SET enable_seqscan = ON; - -DROP SCHEMA test CASCADE; +DROP TABLE test.num_range_rel CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; DROP EXTENSION pg_pathman; -DROP SCHEMA pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_only.sql b/sql/pathman_only.sql index e2813ea6..68dc4ca1 100644 --- a/sql/pathman_only.sql +++ b/sql/pathman_only.sql @@ -2,6 +2,32 @@ * --------------------------------------------- * NOTE: This test behaves differenly on PgPro * --------------------------------------------- + * + * -------------------- + * pathman_only_1.sql + * -------------------- + * Since 608b167f9f in PostgreSQL 12, CTEs which are scanned once are no longer + * an optimization fence, which changes practically all plans here. There is + * an option to forcibly make them MATERIALIZED, but we also need to run tests + * on older versions, so create pathman_only_1.out instead. + * + * -------------------- + * pathman_only_2.sql + * -------------------- + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13, output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * -------------------- + * pathman_only_3.sql + * -------------------- + * Since a5fc46414de in PostgreSQL 16, the order of the operands was changed, + * which affected the output of the "Prune by" in EXPLAIN. + * + * -------------------- + * pathman_only_4.sql + * -------------------- + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. */ \set VERBOSITY terse @@ -66,5 +92,6 @@ WHERE val = (SELECT val FROM ONLY test_only.from_only_test -DROP SCHEMA test_only CASCADE; +DROP TABLE test_only.from_only_test CASCADE; +DROP SCHEMA test_only; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_param_upd_del.sql b/sql/pathman_param_upd_del.sql index 98be1179..0f3030e7 100644 --- a/sql/pathman_param_upd_del.sql +++ b/sql/pathman_param_upd_del.sql @@ -23,6 +23,17 @@ EXPLAIN (COSTS OFF) EXECUTE upd(11); DEALLOCATE upd; +PREPARE upd(INT4) AS UPDATE param_upd_del.test SET val = val + 1 WHERE key = ($1 + 3) * 2; +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(5); +EXPLAIN (COSTS OFF) EXECUTE upd(6); +DEALLOCATE upd; + + PREPARE del(INT4) AS DELETE FROM param_upd_del.test WHERE key = $1; EXPLAIN (COSTS OFF) EXECUTE del(10); EXPLAIN (COSTS OFF) EXECUTE del(10); @@ -34,5 +45,6 @@ EXPLAIN (COSTS OFF) EXECUTE del(11); DEALLOCATE del; -DROP SCHEMA param_upd_del CASCADE; +DROP TABLE param_upd_del.test CASCADE; +DROP SCHEMA param_upd_del; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_permissions.sql b/sql/pathman_permissions.sql index 43bf6ca6..3e2cf92a 100644 --- a/sql/pathman_permissions.sql +++ b/sql/pathman_permissions.sql @@ -1,115 +1,140 @@ \set VERBOSITY terse +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA permissions; -CREATE ROLE user1 LOGIN; -CREATE ROLE user2 LOGIN; +CREATE ROLE pathman_user1 LOGIN; +CREATE ROLE pathman_user2 LOGIN; -GRANT USAGE, CREATE ON SCHEMA permissions TO user1; -GRANT USAGE, CREATE ON SCHEMA permissions TO user2; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user1; +GRANT USAGE, CREATE ON SCHEMA permissions TO pathman_user2; /* Switch to #1 */ -SET ROLE user1; -CREATE TABLE permissions.user1_table(id serial, a int); -INSERT INTO permissions.user1_table SELECT g, g FROM generate_series(1, 20) as g; +SET ROLE pathman_user1; +CREATE TABLE permissions.pathman_user1_table(id serial, a int); +INSERT INTO permissions.pathman_user1_table SELECT g, g FROM generate_series(1, 20) as g; /* Should fail (can't SELECT) */ -SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); - -/* Grant SELECT to user2 */ -SET ROLE user1; -GRANT SELECT ON permissions.user1_table TO user2; +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; + +/* Grant SELECT to pathman_user2 */ +SET ROLE pathman_user1; +GRANT SELECT ON permissions.pathman_user1_table TO pathman_user2; /* Should fail (don't own parent) */ -SET ROLE user2; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user2; +DO $$ +BEGIN + SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; /* Should be ok */ -SET ROLE user1; -SELECT create_range_partitions('permissions.user1_table', 'id', 1, 10, 2); +SET ROLE pathman_user1; +SELECT create_range_partitions('permissions.pathman_user1_table', 'id', 1, 10, 2); /* Should be able to see */ -SET ROLE user2; +SET ROLE pathman_user2; SELECT * FROM pathman_config; SELECT * FROM pathman_config_params; /* Should fail */ -SET ROLE user2; -SELECT set_enable_parent('permissions.user1_table', true); -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user2; +SELECT set_enable_parent('permissions.pathman_user1_table', true); +SELECT set_auto('permissions.pathman_user1_table', false); /* Should fail */ -SET ROLE user2; +SET ROLE pathman_user2; DELETE FROM pathman_config -WHERE partrel = 'permissions.user1_table'::regclass; +WHERE partrel = 'permissions.pathman_user1_table'::regclass; /* No rights to insert, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0); +SET ROLE pathman_user2; +DO $$ +BEGIN + INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; /* No rights to create partitions (need INSERT privilege) */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); -/* Allow user2 to create partitions */ -SET ROLE user1; -GRANT INSERT ON permissions.user1_table TO user2; -GRANT UPDATE(a) ON permissions.user1_table TO user2; /* per-column ACL */ +/* Allow pathman_user2 to create partitions */ +SET ROLE pathman_user1; +GRANT INSERT ON permissions.pathman_user1_table TO pathman_user2; +GRANT UPDATE(a) ON permissions.pathman_user1_table TO pathman_user2; /* per-column ACL */ /* Should be able to prepend a partition */ -SET ROLE user2; -SELECT prepend_range_partition('permissions.user1_table'); +SET ROLE pathman_user2; +SELECT prepend_range_partition('permissions.pathman_user1_table'); SELECT attname, attacl FROM pg_attribute WHERE attrelid = (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_min::int ASC /* prepend */ LIMIT 1) ORDER BY attname; /* check ACL for each column */ /* Have rights, should be ok (parent's ACL is shared by new children) */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (35, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (35, 0) RETURNING *; SELECT relname, relacl FROM pg_class WHERE oid = ANY (SELECT "partition" FROM pathman_partition_list - WHERE parent = 'permissions.user1_table'::REGCLASS + WHERE parent = 'permissions.pathman_user1_table'::REGCLASS ORDER BY range_max::int DESC /* append */ LIMIT 3) -ORDER BY relname; /* we also check ACL for "user1_table_2" */ +ORDER BY relname; /* we also check ACL for "pathman_user1_table_2" */ /* Try to drop partition, should fail */ -SELECT drop_range_partition('permissions.user1_table_4'); +DO $$ +BEGIN + SELECT drop_range_partition('permissions.pathman_user1_table_4'); +EXCEPTION + WHEN insufficient_privilege THEN + RAISE NOTICE 'Insufficient priviliges'; +END$$; /* Disable automatic partition creation */ -SET ROLE user1; -SELECT set_auto('permissions.user1_table', false); +SET ROLE pathman_user1; +SELECT set_auto('permissions.pathman_user1_table', false); /* Partition creation, should fail */ -SET ROLE user2; -INSERT INTO permissions.user1_table (id, a) VALUES (55, 0) RETURNING *; +SET ROLE pathman_user2; +INSERT INTO permissions.pathman_user1_table (id, a) VALUES (55, 0) RETURNING *; /* Finally drop partitions */ -SET ROLE user1; -SELECT drop_partitions('permissions.user1_table'); +SET ROLE pathman_user1; +SELECT drop_partitions('permissions.pathman_user1_table'); /* Switch to #2 */ -SET ROLE user2; +SET ROLE pathman_user2; /* Test ddl event trigger */ -CREATE TABLE permissions.user2_table(id serial); -SELECT create_hash_partitions('permissions.user2_table', 'id', 3); -INSERT INTO permissions.user2_table SELECT generate_series(1, 30); -SELECT drop_partitions('permissions.user2_table'); +CREATE TABLE permissions.pathman_user2_table(id serial); +SELECT create_hash_partitions('permissions.pathman_user2_table', 'id', 3); +INSERT INTO permissions.pathman_user2_table SELECT generate_series(1, 30); +SELECT drop_partitions('permissions.pathman_user2_table'); /* Switch to #1 */ -SET ROLE user1; +SET ROLE pathman_user1; CREATE TABLE permissions.dropped_column(a int, val int not null, b int, c int); INSERT INTO permissions.dropped_column SELECT i,i,i,i FROM generate_series(1, 30) i; -GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO user2; +GRANT SELECT(val), INSERT(val) ON permissions.dropped_column TO pathman_user2; SELECT create_range_partitions('permissions.dropped_column', 'val', 1, 10); @@ -143,11 +168,11 @@ DROP TABLE permissions.dropped_column CASCADE; /* Finally reset user */ RESET ROLE; -DROP OWNED BY user1; -DROP OWNED BY user2; -DROP USER user1; -DROP USER user2; +DROP OWNED BY pathman_user1; +DROP OWNED BY pathman_user2; +DROP USER pathman_user1; +DROP USER pathman_user2; -DROP SCHEMA permissions CASCADE; +DROP SCHEMA permissions; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_deletes.sql b/sql/pathman_rebuild_deletes.sql new file mode 100644 index 00000000..1af6b61a --- /dev/null +++ b/sql/pathman_rebuild_deletes.sql @@ -0,0 +1,65 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_deletes; + + +/* + * Test DELETEs on a partition with different TupleDescriptor. + */ + +/* create partitioned table */ +CREATE TABLE test_deletes.test(a FLOAT4, val INT4 NOT NULL, b FLOAT8); +INSERT INTO test_deletes.test SELECT i, i, i FROM generate_series(1, 100) AS i; +SELECT create_range_partitions('test_deletes.test', 'val', 1, 10); + +/* drop column 'a' */ +ALTER TABLE test_deletes.test DROP COLUMN a; + +/* append new partition */ +SELECT append_range_partition('test_deletes.test'); +INSERT INTO test_deletes.test_11 (val, b) VALUES (101, 10); + + +VACUUM ANALYZE; + + +/* tuple descs are the same */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 1; +DELETE FROM test_deletes.test WHERE val = 1 RETURNING *, tableoid::REGCLASS; + + +/* tuple descs are different */ +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test WHERE val = 101; +DELETE FROM test_deletes.test WHERE val = 101 RETURNING *, tableoid::REGCLASS; + +CREATE TABLE test_deletes.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND val = ANY (TABLE test_deletes.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test t1 +USING test_deletes.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) DELETE FROM test_deletes.test +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +DROP TABLE test_deletes.test_dummy; + + + +DROP TABLE test_deletes.test CASCADE; +DROP SCHEMA test_deletes; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rebuild_updates.sql b/sql/pathman_rebuild_updates.sql index ec4924ea..fbbbcbba 100644 --- a/sql/pathman_rebuild_updates.sql +++ b/sql/pathman_rebuild_updates.sql @@ -1,3 +1,10 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on < 11 because planner now turns + * Row(Const, Const) into just Const of record type, apparently since 3decd150 + * ------------------------------------------- + */ + \set VERBOSITY terse SET search_path = 'public'; @@ -22,6 +29,9 @@ SELECT append_range_partition('test_updates.test'); INSERT INTO test_updates.test_11 (val, b) VALUES (101, 10); +VACUUM ANALYZE; + + /* tuple descs are the same */ EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 1; UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS; @@ -31,7 +41,64 @@ UPDATE test_updates.test SET b = 0 WHERE val = 1 RETURNING *, tableoid::REGCLASS EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 WHERE val = 101; UPDATE test_updates.test SET b = 0 WHERE val = 101 RETURNING *, tableoid::REGCLASS; +CREATE TABLE test_updates.test_dummy (val INT4); + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET val = val + 1 +WHERE val = 101 AND val = ANY (TABLE test_updates.test_dummy) +RETURNING *, tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test t1 SET b = 0 +FROM test_updates.test_dummy t2 +WHERE t1.val = 101 AND t1.val = t2.val +RETURNING t1.*, t1.tableoid::REGCLASS; + +EXPLAIN (COSTS OFF) UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, 8) +RETURNING *, tableoid::REGCLASS; + +/* execute this one */ +UPDATE test_updates.test SET b = 0 +WHERE val = 101 AND test >= (100, -1) +RETURNING test; + +DROP TABLE test_updates.test_dummy; + + +/* cross-partition updates (& different tuple descs) */ +TRUNCATE test_updates.test; +SET pg_pathman.enable_partitionrouter = ON; + +SELECT *, (select count(*) from pg_attribute where attrelid = partition) as columns +FROM pathman_partition_list +ORDER BY range_min::int, range_max::int; + +INSERT INTO test_updates.test VALUES (105, 105); +UPDATE test_updates.test SET val = 106 WHERE val = 105 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 115 WHERE val = 106 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = 95 WHERE val = 115 RETURNING *, tableoid::REGCLASS; +UPDATE test_updates.test SET val = -1 WHERE val = 95 RETURNING *, tableoid::REGCLASS; + + +/* basic check for 'ALTER TABLE ... ADD COLUMN'; PGPRO-5113 */ +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x varchar; +/* no error here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; + +create table test_updates.test_5113(val int4 not null); +insert into test_updates.test_5113 values (1); +select create_range_partitions('test_updates.test_5113', 'val', 1, 10); +update test_updates.test_5113 set val = 11 where val = 1; +alter table test_updates.test_5113 add column x int8; +/* no extra data in column 'x' here: */ +select * from test_updates.test_5113 where val = 11; +drop table test_updates.test_5113 cascade; -DROP SCHEMA test_updates CASCADE; +DROP TABLE test_updates.test CASCADE; +DROP SCHEMA test_updates; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_rowmarks.sql b/sql/pathman_rowmarks.sql index 72e40b8e..8847b80c 100644 --- a/sql/pathman_rowmarks.sql +++ b/sql/pathman_rowmarks.sql @@ -1,6 +1,37 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on PgPro + * ------------------------------------------- + * + * ------------------------ + * pathman_rowmarks_1.sql + * ------------------------ + * Since PostgreSQL 9.5, output of EXPLAIN was changed. + * + * ------------------------ + * pathman_rowmarks_2.sql + * ------------------------ + * Since 8edd0e794 in PostgreSQL 12, append nodes with single subplan are + * eliminated, causing different output. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since 55a1954da16 and 6ef77cf46e8 in PostgreSQL 13 output of EXPLAIN was + * changed, now it includes aliases for inherited tables. + * + * ------------------------ + * pathman_rowmarks_3.sql + * ------------------------ + * Since fd0398fcb09 in PostgreSQL 17, output of EXPLAIN was + * changed, now it displays SubPlan nodes and output parameters. + */ +SET search_path = 'public'; CREATE EXTENSION pg_pathman; CREATE SCHEMA rowmarks; + + CREATE TABLE rowmarks.first(id int NOT NULL); CREATE TABLE rowmarks.second(id int NOT NULL); @@ -10,6 +41,10 @@ INSERT INTO rowmarks.second SELECT generate_series(1, 10); SELECT create_hash_partitions('rowmarks.first', 'id', 5); + +VACUUM ANALYZE; + + /* Not partitioned */ SELECT * FROM rowmarks.second ORDER BY id FOR UPDATE; @@ -56,6 +91,68 @@ WHERE id = (SELECT id FROM rowmarks.second FOR UPDATE) FOR SHARE; +/* JOIN (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* JOIN (execution) */ +SELECT * FROM rowmarks.first +JOIN rowmarks.second USING(id) +ORDER BY id +FOR UPDATE; + +/* ONLY (plan) */ +EXPLAIN (COSTS OFF) +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* ONLY (execution) */ +SELECT * FROM ONLY rowmarks.first FOR SHARE; + +/* Check updates (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +EXPLAIN (COSTS OFF) +UPDATE rowmarks.second SET id = 2 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1) +RETURNING *, tableoid::regclass; +SET enable_hashjoin = t; +SET enable_mergejoin = t; + +/* Check updates (execution) */ +UPDATE rowmarks.second SET id = 1 +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2) +RETURNING *, tableoid::regclass; + +/* Check deletes (plan) */ +SET enable_hashjoin = f; /* Hash Semi Join on 10 vs Hash Join on 9.6 */ +SET enable_mergejoin = f; /* Merge Semi Join on 10 vs Merge Join on 9.6 */ +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id < 1); +EXPLAIN (COSTS OFF) +DELETE FROM rowmarks.second +WHERE rowmarks.second.id IN (SELECT id FROM rowmarks.first WHERE id = 1 OR id = 2); +SET enable_hashjoin = t; +SET enable_mergejoin = t; + + -DROP SCHEMA rowmarks CASCADE; +DROP TABLE rowmarks.first CASCADE; +DROP TABLE rowmarks.second CASCADE; +DROP SCHEMA rowmarks; DROP EXTENSION pg_pathman; diff --git a/sql/pathman_runtime_nodes.sql b/sql/pathman_runtime_nodes.sql index e5cf17a5..bf917d88 100644 --- a/sql/pathman_runtime_nodes.sql +++ b/sql/pathman_runtime_nodes.sql @@ -1,5 +1,5 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE SCHEMA pathman; CREATE EXTENSION pg_pathman SCHEMA pathman; CREATE SCHEMA test; @@ -63,7 +63,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -106,7 +105,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -140,7 +138,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -180,7 +177,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_mergejoin = off set enable_hashjoin = off; @@ -242,7 +238,6 @@ begin return 'ok'; end; $$ language plpgsql -set pg_pathman.enable = true set enable_hashjoin = off set enable_mergejoin = off; @@ -312,6 +307,15 @@ join (select * from test.run_values limit 4) as t2 on t1.id = t2.val; select count(*) = 0 from pathman.pathman_partition_list where parent = 'test.runtime_test_4'::regclass and coalesce(range_min::int, 1) < 0; +/* RuntimeAppend (check that dropped columns don't break tlists) */ +create table test.dropped_cols(val int4 not null); +select pathman.create_hash_partitions('test.dropped_cols', 'val', 4); +insert into test.dropped_cols select generate_series(1, 100); +alter table test.dropped_cols add column new_col text; /* add column */ +alter table test.dropped_cols drop column new_col; /* drop column! */ +explain (costs off) select * from generate_series(1, 10) f(id), lateral (select count(1) FILTER (WHERE true) from test.dropped_cols where val = f.id) c; +drop table test.dropped_cols cascade; + set enable_hashjoin = off; set enable_mergejoin = off; @@ -322,7 +326,47 @@ set enable_hashjoin = on; set enable_mergejoin = on; -DROP SCHEMA test CASCADE; -DROP EXTENSION pg_pathman CASCADE; -DROP SCHEMA pathman CASCADE; +DROP TABLE test.vals CASCADE; +DROP TABLE test.category CASCADE; +DROP TABLE test.run_values CASCADE; +DROP TABLE test.runtime_test_1 CASCADE; +DROP TABLE test.runtime_test_2 CASCADE; +DROP TABLE test.runtime_test_3 CASCADE; +DROP TABLE test.runtime_test_4 CASCADE; +DROP FUNCTION test.pathman_assert(bool, text); +DROP FUNCTION test.pathman_equal(text, text, text); +DROP FUNCTION test.pathman_test(text); +DROP FUNCTION test.pathman_test_1(); +DROP FUNCTION test.pathman_test_2(); +DROP FUNCTION test.pathman_test_3(); +DROP FUNCTION test.pathman_test_4(); +DROP FUNCTION test.pathman_test_5(); +DROP SCHEMA test; +-- +-- +-- PGPRO-7928 +-- Variable pg_pathman.enable must be called before any query. +-- +CREATE TABLE part_test (val int NOT NULL); +SELECT pathman.create_hash_partitions('part_test', 'val', 2, partition_names := array['part_test_1','pg_pathman']); +CREATE OR REPLACE FUNCTION part_test_trigger() RETURNS TRIGGER AS $$ +BEGIN + RAISE NOTICE '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + IF TG_OP::text = 'DELETE'::text then + SET pg_pathman.enable = f; + RETURN new; + END IF; +END; +$$ LANGUAGE PLPGSQL; +SET pg_pathman.enable_partitionrouter = t; +CREATE TRIGGER ad AFTER DELETE ON part_test_1 FOR EACH ROW EXECUTE PROCEDURE part_test_trigger (); +INSERT INTO part_test VALUES (1); +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; +UPDATE part_test SET val = val + 1 RETURNING *, tableoid::regclass; + +RESET pg_pathman.enable_partitionrouter; +DROP TABLE part_test CASCADE; +DROP FUNCTION part_test_trigger(); +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_subpartitions.sql b/sql/pathman_subpartitions.sql new file mode 100644 index 00000000..5515874c --- /dev/null +++ b/sql/pathman_subpartitions.sql @@ -0,0 +1,169 @@ +/* + * Since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_subpartitions_1.out is the updated version. + */ + +\set VERBOSITY terse + +CREATE EXTENSION pg_pathman; +CREATE SCHEMA subpartitions; + + + +/* Create two level partitioning structure */ +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +INSERT INTO subpartitions.abc SELECT i, i FROM generate_series(1, 200, 20) as i; +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_hash_partitions('subpartitions.abc_1', 'a', 3); +SELECT create_hash_partitions('subpartitions.abc_2', 'b', 2); +SELECT * FROM pathman_partition_list; +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + +/* Insert should result in creation of new subpartition */ +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); +SELECT create_range_partitions('subpartitions.abc_3', 'b', 200, 10, 2); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +INSERT INTO subpartitions.abc VALUES (215, 215); +SELECT * FROM pathman_partition_list WHERE parent = 'subpartitions.abc_3'::regclass; +SELECT tableoid::regclass, * FROM subpartitions.abc WHERE a = 215 AND b = 215 ORDER BY a, b; + +/* Pruning tests */ +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a < 150; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a = 215 AND b = 215; +EXPLAIN (COSTS OFF) SELECT * FROM subpartitions.abc WHERE a >= 210 AND b >= 210; + +CREATE OR REPLACE FUNCTION check_multilevel_queries() +RETURNS VOID AS +$$ +BEGIN + IF NOT EXISTS(SELECT * FROM (SELECT tableoid::regclass, * + FROM subpartitions.abc + WHERE a = 215 AND b = 215 + ORDER BY a, b) t1) + THEN + RAISE EXCEPTION 'should be at least one record in result'; + END IF; +END +$$ LANGUAGE plpgsql; +SELECT check_multilevel_queries(); +DROP FUNCTION check_multilevel_queries(); + +/* Multilevel partitioning with updates */ +CREATE OR REPLACE FUNCTION subpartitions.partitions_tree( + rel REGCLASS, + level TEXT DEFAULT ' ' +) +RETURNS SETOF TEXT AS +$$ +DECLARE + partition REGCLASS; + subpartition TEXT; +BEGIN + IF rel IS NULL THEN + RETURN; + END IF; + + RETURN NEXT rel::TEXT; + + FOR partition IN (SELECT l.partition FROM pathman_partition_list l WHERE parent = rel) + LOOP + FOR subpartition IN (SELECT subpartitions.partitions_tree(partition, level || ' ')) + LOOP + RETURN NEXT level || subpartition::TEXT; + END LOOP; + END LOOP; +END +$$ LANGUAGE plpgsql; + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_4'); +SELECT create_hash_partitions('subpartitions.abc_4', 'b', 2); +SELECT subpartitions.partitions_tree('subpartitions.abc'); +DROP TABLE subpartitions.abc CASCADE; + + +/* Test that update works correctly */ +SET pg_pathman.enable_partitionrouter = ON; + +CREATE TABLE subpartitions.abc(a INTEGER NOT NULL, b INTEGER NOT NULL); +SELECT create_range_partitions('subpartitions.abc', 'a', 0, 100, 2); +SELECT create_range_partitions('subpartitions.abc_1', 'b', 0, 50, 2); /* 0 - 100 */ +SELECT create_range_partitions('subpartitions.abc_2', 'b', 0, 50, 2); /* 100 - 200 */ + +INSERT INTO subpartitions.abc SELECT 25, 25 FROM generate_series(1, 10); +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_1_1 */ + +UPDATE subpartitions.abc SET a = 125 WHERE a = 25 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_1 */ + +UPDATE subpartitions.abc SET b = 75 WHERE a = 125 and b = 25; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_2 */ + +UPDATE subpartitions.abc SET b = 125 WHERE a = 125 and b = 75; +SELECT tableoid::regclass, * FROM subpartitions.abc; /* subpartitions.abc_2_3 */ + + +/* split_range_partition */ +SELECT split_range_partition('subpartitions.abc_2', 150); /* FAIL */ +SELECT split_range_partition('subpartitions.abc_2_2', 75); /* OK */ +SELECT subpartitions.partitions_tree('subpartitions.abc'); + + +/* merge_range_partitions */ +TRUNCATE subpartitions.abc; +INSERT INTO subpartitions.abc VALUES (150, 0); + +SELECT append_range_partition('subpartitions.abc', 'subpartitions.abc_3'); /* 200 - 300 */ +INSERT INTO subpartitions.abc VALUES (250, 50); + +SELECT merge_range_partitions('subpartitions.abc_2', 'subpartitions.abc_3'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + +SELECT merge_range_partitions('subpartitions.abc_2_1', 'subpartitions.abc_2_2'); /* OK */ +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY a, b; + + +DROP TABLE subpartitions.abc CASCADE; + + +/* Check insert & update with dropped columns */ +CREATE TABLE subpartitions.abc(a int, b int, c int, id1 int not null, id2 int not null, val serial); +SELECT create_range_partitions('subpartitions.abc', 'id1', 0, 100, 2); +ALTER TABLE subpartitions.abc DROP COLUMN c; +SELECT prepend_range_partition('subpartitions.abc'); +ALTER TABLE subpartitions.abc DROP COLUMN b; +SELECT create_range_partitions('subpartitions.abc_3', 'id2', 0, 10, 3); +ALTER TABLE subpartitions.abc DROP COLUMN a; +SELECT prepend_range_partition('subpartitions.abc_3'); + +SELECT * FROM pathman_partition_list ORDER BY parent, partition; + +INSERT INTO subpartitions.abc VALUES (10, 0), (110, 0), (-1, 0), (-1, -1); +SELECT tableoid::regclass, * FROM subpartitions.abc ORDER BY id1, id2, val; + +SET pg_pathman.enable_partitionrouter = ON; +WITH updated AS (UPDATE subpartitions.abc SET id1 = -1, id2 = -1 RETURNING tableoid::regclass, *) +SELECT * FROM updated ORDER BY val ASC; + +DROP TABLE subpartitions.abc CASCADE; + + +--- basic check how rowmark plays along with subparts; PGPRO-2755 +CREATE TABLE subpartitions.a1(n1 integer); +CREATE TABLE subpartitions.a2(n1 integer not null, n2 integer not null); +SELECT create_range_partitions('subpartitions.a2', 'n1', 1, 2, 0); + +SELECT add_range_partition('subpartitions.a2', 10, 20, 'subpartitions.a2_1020'); +SELECT create_range_partitions('subpartitions.a2_1020'::regclass, 'n2'::text, array[30,40], array['subpartitions.a2_1020_3040']); +INSERT INTO subpartitions.a2 VALUES (10, 30), (11, 31), (12, 32), (19, 39); +INSERT INTO subpartitions.a1 VALUES (12), (19), (20); + +SELECT a2.* FROM subpartitions.a1 JOIN subpartitions.a2 ON a2.n1=a1.n1 FOR UPDATE; + +DROP TABLE subpartitions.a2 CASCADE; +DROP TABLE subpartitions.a1; + + +DROP FUNCTION subpartitions.partitions_tree(regclass, text); +DROP SCHEMA subpartitions; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_upd_del.sql b/sql/pathman_upd_del.sql new file mode 100644 index 00000000..c99b9666 --- /dev/null +++ b/sql/pathman_upd_del.sql @@ -0,0 +1,285 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output. Moreover, again since 12 (608b167f9f), CTEs which are + * scanned once are no longer an optimization fence, changing a good deal of + * plans here. There is an option to forcibly make them MATERIALIZED, but we + * also need to run tests on older versions, so put updated plans in + * pathman_upd_del_2.out instead. + * + * In Postgres Pro Standard/Enterprise 15+ the EXPLAIN output has changed so put + * the updated plan in pathman_upd_del_4.out. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE SCHEMA pathman; +CREATE EXTENSION pg_pathman SCHEMA pathman; +CREATE SCHEMA test; + + + +SET enable_indexscan = ON; +SET enable_seqscan = OFF; + + +/* Temporary tables for JOINs */ +CREATE TABLE test.tmp (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp VALUES (1, 1), (2, 2); + +CREATE TABLE test.tmp2 (id INTEGER NOT NULL, value INTEGER NOT NULL); +INSERT INTO test.tmp2 SELECT i % 10 + 1, i FROM generate_series(1, 100) i; +SELECT pathman.create_range_partitions('test.tmp2', 'id', 1, 1, 10); + + +/* Partition table by RANGE */ +CREATE TABLE test.range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + value INTEGER); + +INSERT INTO test.range_rel (dt, value) SELECT g, extract(day from g) +FROM generate_series('2010-01-01'::date, '2010-12-31'::date, '1 day') AS g; + +SELECT pathman.create_range_partitions('test.range_rel', 'dt', + '2010-01-01'::date, '1 month'::interval, + 12); + + +VACUUM ANALYZE; + + +/* + * Test UPDATE and DELETE + */ + +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; + + +/* have partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt = '2010-06-15'; + +BEGIN; +DELETE FROM test.range_rel WHERE dt = '2010-06-15'; +SELECT * FROM test.range_rel WHERE dt = '2010-06-15'; +ROLLBACK; + + +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) UPDATE test.range_rel SET value = 222 WHERE dt = '1990-01-01'; + +BEGIN; +UPDATE test.range_rel SET value = 111 WHERE dt = '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt = '1990-01-01'; +ROLLBACK; + + +/* no partitions for this 'dt' */ +EXPLAIN (COSTS OFF) DELETE FROM test.range_rel WHERE dt < '1990-01-01'; + +BEGIN; +DELETE FROM test.range_rel WHERE dt < '1990-01-01'; +SELECT * FROM test.range_rel WHERE dt < '1990-01-01'; +ROLLBACK; + + +/* UPDATE + FROM, partitioned table */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.range_rel r SET value = t.value +FROM test.tmp t WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* UPDATE + FROM, single table */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; + +BEGIN; +UPDATE test.tmp t SET value = r.value +FROM test.range_rel r WHERE r.dt = '2010-01-01' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp t +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, single table */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; + +BEGIN; +DELETE FROM test.tmp t USING test.range_rel r +WHERE r.dt = '2010-01-02' AND r.id = t.id; +ROLLBACK; + + +/* DELETE + USING, two partitioned tables */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r USING test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, partitioned table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.range_rel r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* DELETE + USING, single table + two partitioned tables in subselect */ +EXPLAIN (COSTS OFF) +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; + +BEGIN; +DELETE FROM test.tmp r +USING (SELECT * + FROM test.tmp2 a1 + JOIN test.tmp2 a2 + USING(id)) t +WHERE t.id = r.id; +ROLLBACK; + + +/* UPDATE + FROM, two partitioned tables */ +EXPLAIN (COSTS OFF) +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; + +BEGIN; +UPDATE test.range_rel r SET value = 1 FROM test.tmp2 t +WHERE t.id = r.id; +ROLLBACK; + + +/* + * UPDATE + subquery with partitioned table (PG 9.5). + * See pathman_rel_pathlist_hook() + RELOPT_OTHER_MEMBER_REL. + */ +EXPLAIN (COSTS OFF) +UPDATE test.tmp t SET value = 2 +WHERE t.id IN (SELECT id + FROM test.tmp2 t2 + WHERE id = t.id); + + +/* Test special rule for CTE; SELECT (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (SELECT * FROM test.range_rel r + WHERE r.dt = '2010-01-02') +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.range_rel r + WHERE r.dt = '2010-01-02' + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; DELETE + USING (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; + +BEGIN; +WITH q AS (DELETE FROM test.tmp t + USING test.range_rel r + WHERE r.dt = '2010-01-02' AND r.id = t.id + RETURNING *) +DELETE FROM test.tmp USING q; +ROLLBACK; + + +/* Test special rule for CTE; Nested CTEs (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (WITH n AS (SELECT id FROM test.tmp2 WHERE id = 2) + DELETE FROM test.tmp t + USING n + WHERE t.id = n.id + RETURNING *) +DELETE FROM test.tmp USING q; + + +/* Test special rule for CTE; CTE in quals (PostgreSQL 9.5) */ +EXPLAIN (COSTS OFF) +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); + +BEGIN; +WITH q AS (SELECT id FROM test.tmp2 + WHERE id < 3) +DELETE FROM test.tmp t WHERE t.id in (SELECT id FROM q); +ROLLBACK; + + + +DROP TABLE test.tmp CASCADE; +DROP TABLE test.tmp2 CASCADE; +DROP TABLE test.range_rel CASCADE; +DROP SCHEMA test; +DROP EXTENSION pg_pathman CASCADE; +DROP SCHEMA pathman; diff --git a/sql/pathman_update_node.sql b/sql/pathman_update_node.sql new file mode 100644 index 00000000..e70f60f4 --- /dev/null +++ b/sql/pathman_update_node.sql @@ -0,0 +1,220 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_node; + + +SET pg_pathman.enable_partitionrouter = ON; + + +/* Partition table by RANGE (NUMERIC) */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +CREATE INDEX val_idx ON test_update_node.test_range (val); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +/* Moving from 2st to 1st partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 5 WHERE val = 15; + +/* Keep same partition */ +EXPLAIN (COSTS OFF) UPDATE test_update_node.test_range SET val = 14 WHERE val = 15; + +/* Update values in 1st partition (rows remain there) */ +UPDATE test_update_node.test_range SET val = 5 WHERE val <= 10; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val < 10 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Update values in 2nd partition (rows move to 3rd partition) */ +UPDATE test_update_node.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val > 20 AND val <= 30 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Move single row */ +UPDATE test_update_node.test_range SET val = 90 WHERE val = 80; + +/* Check values #3 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 90 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Move single row (create new partition) */ +UPDATE test_update_node.test_range SET val = -1 WHERE val = 50; + +/* Check values #4 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = -1 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Update non-key column */ +UPDATE test_update_node.test_range SET comment = 'test!' WHERE val = 100; + +/* Check values #5 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 100 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Try moving row into a gap (ERROR) */ +DROP TABLE test_update_node.test_range_4; +UPDATE test_update_node.test_range SET val = 35 WHERE val = 70; + +/* Check values #6 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 70 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Test trivial move (same key) */ +UPDATE test_update_node.test_range SET val = 65 WHERE val = 65; + +/* Check values #7 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 65 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_range; + + +/* Test tuple conversion (attached partition) */ +CREATE TABLE test_update_node.test_range_inv(comment TEXT, val NUMERIC NOT NULL); +SELECT attach_range_partition('test_update_node.test_range', + 'test_update_node.test_range_inv', + 101::NUMERIC, 111::NUMERIC); +UPDATE test_update_node.test_range SET val = 105 WHERE val = 60; +UPDATE test_update_node.test_range SET val = 105 WHERE val = 105; + +/* Check values #8 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 105 +ORDER BY comment; + +UPDATE test_update_node.test_range SET val = 60 WHERE val = 105; +SELECT count(*) FROM test_update_node.test_range; + +/* Test RETURNING */ +UPDATE test_update_node.test_range SET val = 71 WHERE val = 41 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 71 WHERE val = 71 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 61 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 106 WHERE val = 106 RETURNING val, comment; +UPDATE test_update_node.test_range SET val = 61 WHERE val = 106 RETURNING val, comment; + +/* Just in case, check we don't duplicate anything */ +SELECT count(*) FROM test_update_node.test_range; + +/* Test tuple conversion (dropped column) */ +ALTER TABLE test_update_node.test_range DROP COLUMN comment CASCADE; +SELECT append_range_partition('test_update_node.test_range'); +UPDATE test_update_node.test_range SET val = 115 WHERE val = 55; +UPDATE test_update_node.test_range SET val = 115 WHERE val = 115; + +/* Check values #9 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_range +WHERE val = 115; + +UPDATE test_update_node.test_range SET val = 55 WHERE val = 115; +SELECT count(*) FROM test_update_node.test_range; + +DROP TABLE test_update_node.test_range CASCADE; + +/* recreate table and mass move */ +CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i; +SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10); + +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* move everything to next partition */ +UPDATE test_update_node.test_range SET val = val + 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; + +/* move everything to previous partition */ +UPDATE test_update_node.test_range SET val = val - 10; +SELECT tableoid::regclass, MIN(val) FROM test_update_node.test_range + GROUP BY tableoid::regclass ORDER BY tableoid::regclass; +SELECT count(*) FROM test_update_node.test_range; + +/* Partition table by HASH (INT4) */ +CREATE TABLE test_update_node.test_hash(val INT4 NOT NULL, comment TEXT); +INSERT INTO test_update_node.test_hash SELECT i, i FROM generate_series(1, 10) i; +SELECT create_hash_partitions('test_update_node.test_hash', 'val', 3); + + +/* Shuffle rows a few times */ +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; +UPDATE test_update_node.test_hash SET val = val + 1; + +/* Check values #0 */ +SELECT tableoid::regclass, * FROM test_update_node.test_hash ORDER BY val; + + +/* Move all rows into single partition */ +UPDATE test_update_node.test_hash SET val = 1; + +/* Check values #1 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 1 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_hash; + + +/* Don't move any rows */ +UPDATE test_update_node.test_hash SET val = 3 WHERE val = 2; + +/* Check values #2 */ +SELECT tableoid::REGCLASS, * +FROM test_update_node.test_hash +WHERE val = 3 +ORDER BY comment; + +SELECT count(*) FROM test_update_node.test_hash; + + + +DROP TABLE test_update_node.test_hash CASCADE; +DROP TABLE test_update_node.test_range CASCADE; +DROP SCHEMA test_update_node; +DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_trigger.sql b/sql/pathman_update_trigger.sql deleted file mode 100644 index a5f5b10e..00000000 --- a/sql/pathman_update_trigger.sql +++ /dev/null @@ -1,164 +0,0 @@ -\set VERBOSITY terse - -SET search_path = 'public'; -CREATE EXTENSION pg_pathman; -CREATE SCHEMA test_update_trigger; - - - -/* Partition table by RANGE (NUMERIC) */ -CREATE TABLE test_update_trigger.test_range(val NUMERIC NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_range SELECT i, i FROM generate_series(1, 100) i; -SELECT create_range_partitions('test_update_trigger.test_range', 'val', 1, 10); -SELECT create_update_triggers('test_update_trigger.test_range'); - - -/* Update values in 1st partition (rows remain there) */ -UPDATE test_update_trigger.test_range SET val = 5 WHERE val <= 10; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val < 10 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update values in 2nd partition (rows move to 3rd partition) */ -UPDATE test_update_trigger.test_range SET val = val + 10 WHERE val > 10 AND val <= 20; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val > 20 AND val <= 30 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row */ -UPDATE test_update_trigger.test_range SET val = 90 WHERE val = 80; - -/* Check values #3 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 90 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Move single row (create new partition) */ -UPDATE test_update_trigger.test_range SET val = -1 WHERE val = 50; - -/* Check values #4 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = -1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Update non-key column */ -UPDATE test_update_trigger.test_range SET comment = 'test!' WHERE val = 100; - -/* Check values #5 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 100 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Try moving row into a gap (ERROR) */ -DROP TABLE test_update_trigger.test_range_4; -UPDATE test_update_trigger.test_range SET val = 35 WHERE val = 70; - -/* Check values #6 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 70 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test trivial move (same key) */ -UPDATE test_update_trigger.test_range SET val = 65 WHERE val = 65; - -/* Check values #7 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 65 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (attached partition) */ -CREATE TABLE test_update_trigger.test_range_inv(comment TEXT, val NUMERIC NOT NULL); -SELECT attach_range_partition('test_update_trigger.test_range', - 'test_update_trigger.test_range_inv', - 101::NUMERIC, 111::NUMERIC); -UPDATE test_update_trigger.test_range SET val = 105 WHERE val = 60; - -/* Check values #8 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 105 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_range; - - -/* Test tuple conversion (dropped column) */ -ALTER TABLE test_update_trigger.test_range DROP COLUMN comment CASCADE; -SELECT append_range_partition('test_update_trigger.test_range'); -UPDATE test_update_trigger.test_range SET val = 115 WHERE val = 55; - -/* Check values #9 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_range -WHERE val = 115; - -SELECT count(*) FROM test_update_trigger.test_range; - - - -/* Partition table by HASH (INT4) */ -CREATE TABLE test_update_trigger.test_hash(val INT4 NOT NULL, comment TEXT); -INSERT INTO test_update_trigger.test_hash SELECT i, i FROM generate_series(1, 10) i; -SELECT create_hash_partitions('test_update_trigger.test_hash', 'val', 3); -SELECT create_update_triggers('test_update_trigger.test_hash'); - - -/* Move all rows into single partition */ -UPDATE test_update_trigger.test_hash SET val = 1; - -/* Check values #1 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 1 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - -/* Don't move any rows */ -UPDATE test_update_trigger.test_hash SET val = 3 WHERE val = 2; - -/* Check values #2 */ -SELECT tableoid::REGCLASS, * -FROM test_update_trigger.test_hash -WHERE val = 3 -ORDER BY comment; - -SELECT count(*) FROM test_update_trigger.test_hash; - - - -DROP SCHEMA test_update_trigger CASCADE; -DROP EXTENSION pg_pathman; diff --git a/sql/pathman_update_triggers.sql b/sql/pathman_update_triggers.sql new file mode 100644 index 00000000..646afe65 --- /dev/null +++ b/sql/pathman_update_triggers.sql @@ -0,0 +1,146 @@ +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA test_update_triggers; + + + +create table test_update_triggers.test (val int not null); +select create_hash_partitions('test_update_triggers.test', 'val', 2, + partition_names := array[ + 'test_update_triggers.test_1', + 'test_update_triggers.test_2']); + + +create or replace function test_update_triggers.test_trigger() returns trigger as $$ +begin + raise notice '%', format('%s %s %s (%s)', TG_WHEN, TG_OP, TG_LEVEL, TG_TABLE_NAME); + + if TG_OP::text = 'DELETE'::text then + return old; + else + return new; + end if; end; +$$ language plpgsql; + + +/* Enable our precious custom node */ +set pg_pathman.enable_partitionrouter = t; + + +/* + * Statement level triggers + */ + +create trigger bus before update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger aus after update ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_1 + execute procedure test_update_triggers.test_trigger (); + + +create trigger bus before update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bds before delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger bis before insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + +create trigger aus after update ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ads after delete ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); +create trigger ais after insert ON test_update_triggers.test_2 + execute procedure test_update_triggers.test_trigger (); + + +/* multiple values */ +insert into test_update_triggers.test select generate_series(1, 200); + +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; +update test_update_triggers.test set val = val + 1; + +select count(distinct val) from test_update_triggers.test; + + +truncate test_update_triggers.test; + + +/* + * Row level triggers + */ + +create trigger bu before update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_1 + for each row execute procedure test_update_triggers.test_trigger (); + + +create trigger bu before update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bd before delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger bi before insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + +create trigger au after update ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ad after delete ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); +create trigger ai after insert ON test_update_triggers.test_2 + for each row execute procedure test_update_triggers.test_trigger (); + + +/* single value */ +insert into test_update_triggers.test values (1); + +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; +update test_update_triggers.test set val = val + 1 returning *, tableoid::regclass; + +select count(distinct val) from test_update_triggers.test; + + +DROP TABLE test_update_triggers.test CASCADE; +DROP FUNCTION test_update_triggers.test_trigger(); +DROP SCHEMA test_update_triggers; +DROP EXTENSION pg_pathman CASCADE; diff --git a/sql/pathman_utility_stmt.sql b/sql/pathman_utility_stmt.sql index 7dc9dd2f..08992835 100644 --- a/sql/pathman_utility_stmt.sql +++ b/sql/pathman_utility_stmt.sql @@ -1,7 +1,9 @@ \set VERBOSITY terse - +SET search_path = 'public'; CREATE EXTENSION pg_pathman; + + /* * Test COPY */ @@ -26,11 +28,6 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY TO */ -COPY copy_stmt_hooking.test TO stdout; -\copy copy_stmt_hooking.test to stdout (format csv) -\copy copy_stmt_hooking.test(comment) to stdout - /* DELETE ROWS, COPY FROM */ DELETE FROM copy_stmt_hooking.test; COPY copy_stmt_hooking.test FROM stdin; @@ -50,20 +47,21 @@ VACUUM FULL copy_stmt_hooking.test_2; VACUUM FULL copy_stmt_hooking.test_3; VACUUM FULL copy_stmt_hooking.test_4; -/* COPY FROM (specified columns) */ -COPY copy_stmt_hooking.test (val) TO stdout; -COPY copy_stmt_hooking.test (val, comment) TO stdout; -COPY copy_stmt_hooking.test (c3, val, comment) TO stdout; -COPY copy_stmt_hooking.test (val, comment, c3, c4) TO stdout; +/* COPY TO */ +COPY copy_stmt_hooking.test TO stdout; /* not ok */ +COPY copy_stmt_hooking.test (val) TO stdout; /* not ok */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout (FORMAT CSV); +\copy (SELECT * FROM copy_stmt_hooking.test) TO stdout -/* COPY TO (partition does not exist, NOT allowed to create partitions) */ +/* COPY FROM (partition does not exist, NOT allowed to create partitions) */ SET pg_pathman.enable_auto_partition = OFF; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 \. SELECT * FROM copy_stmt_hooking.test WHERE val > 20; -/* COPY TO (partition does not exist, allowed to create partitions) */ +/* COPY FROM (partition does not exist, allowed to create partitions) */ SET pg_pathman.enable_auto_partition = ON; COPY copy_stmt_hooking.test FROM stdin; 21 test_no_part 0 0 @@ -96,16 +94,16 @@ WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test'::REGCLASS; SELECT count(*) FROM pg_attribute WHERE attnum > 0 AND attrelid = 'copy_stmt_hooking.test_6'::REGCLASS; +/* test transformed tuples */ +COPY (SELECT * FROM copy_stmt_hooking.test) TO stdout; -/* COPY FROM (test transformed tuples) */ -COPY copy_stmt_hooking.test (val, c3, c4) TO stdout; -/* COPY TO (insert into table with dropped column) */ +/* COPY FROM (insert into table with dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 2 1 2 \. -/* COPY TO (insert into table without dropped column) */ +/* COPY FROM (insert into table without dropped column) */ COPY copy_stmt_hooking.test(val, c3, c4) FROM stdin; 27 1 2 \. @@ -156,7 +154,10 @@ COPY copy_stmt_hooking.test2(t) FROM stdin; \. SELECT COUNT(*) FROM copy_stmt_hooking.test2; -DROP SCHEMA copy_stmt_hooking CASCADE; +DROP TABLE copy_stmt_hooking.test CASCADE; +DROP TABLE copy_stmt_hooking.test2 CASCADE; +DROP SCHEMA copy_stmt_hooking; + /* @@ -164,6 +165,25 @@ DROP SCHEMA copy_stmt_hooking CASCADE; */ CREATE SCHEMA rename; + +/* + * Check that auto naming sequence is renamed + */ +CREATE TABLE rename.parent(id int not null); +SELECT create_range_partitions('rename.parent', 'id', 1, 2, 2); +SELECT 'rename.parent'::regclass; /* parent is OK */ +SELECT 'rename.parent_seq'::regclass; /* sequence is OK */ +ALTER TABLE rename.parent RENAME TO parent_renamed; +SELECT 'rename.parent_renamed'::regclass; /* parent is OK */ +SELECT 'rename.parent_renamed_seq'::regclass; /* sequence is OK */ +SELECT append_range_partition('rename.parent_renamed'); /* can append */ +DROP SEQUENCE rename.parent_renamed_seq; +ALTER TABLE rename.parent_renamed RENAME TO parent; +SELECT 'rename.parent'::regclass; /* parent is OK */ + +/* + * Check that partitioning constraints are renamed + */ CREATE TABLE rename.test(a serial, b int); SELECT create_hash_partitions('rename.test', 'a', 3); ALTER TABLE rename.test_0 RENAME TO test_one; @@ -198,7 +218,9 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.test_inh_one'::regclass AND r.contype = 'c'; -/* Check that plain tables are not affected too */ +/* + * Check that plain tables are not affected too + */ CREATE TABLE rename.plain_test(a serial, b int); ALTER TABLE rename.plain_test RENAME TO plain_test_renamed; SELECT add_constraint('rename.plain_test_renamed'); @@ -213,6 +235,75 @@ SELECT r.conname, pg_get_constraintdef(r.oid, true) FROM pg_constraint r WHERE r.conrelid = 'rename.plain_test'::regclass AND r.contype = 'c'; -DROP SCHEMA rename CASCADE; + +DROP TABLE rename.plain_test CASCADE; +DROP TABLE rename.test_inh CASCADE; +DROP TABLE rename.parent CASCADE; +DROP TABLE rename.test CASCADE; +DROP FUNCTION add_constraint(regclass); +DROP SCHEMA rename; + + + +/* + * Test DROP INDEX CONCURRENTLY (test snapshots) + */ +CREATE SCHEMA drop_index; + +CREATE TABLE drop_index.test (val INT4 NOT NULL); +CREATE INDEX ON drop_index.test (val); +SELECT create_hash_partitions('drop_index.test', 'val', 2); +DROP INDEX CONCURRENTLY drop_index.test_0_val_idx; + +DROP TABLE drop_index.test CASCADE; +DROP SCHEMA drop_index; + +/* + * Checking that ALTER TABLE IF EXISTS with loaded (and created) pg_pathman extension works the same as in vanilla + */ +CREATE SCHEMA test_nonexistance; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME TO other_table_name; +/* renaming existent tables already tested earlier (see rename.plain_test) */ + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table ADD COLUMN j INT4; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table ADD COLUMN j INT4; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table DROP COLUMN IF EXISTS i; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS i; +ALTER TABLE IF EXISTS test_nonexistance.existent_table DROP COLUMN IF EXISTS nonexistent_column; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME COLUMN i TO j; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME COLUMN i TO j; +SELECT attname FROM pg_attribute WHERE attnum > 0 AND attrelid = 'test_nonexistance.existent_table'::REGCLASS; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table RENAME CONSTRAINT baz TO bar; +CREATE TABLE test_nonexistance.existent_table(i INT4 CONSTRAINT existent_table_i_check CHECK (i < 100)); +ALTER TABLE IF EXISTS test_nonexistance.existent_table RENAME CONSTRAINT existent_table_i_check TO existent_table_i_other_check; +DROP TABLE test_nonexistance.existent_table; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET SCHEMA nonexistent_schema; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA nonexistent_schema; +CREATE SCHEMA test_nonexistance2; +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET SCHEMA test_nonexistance2; +DROP TABLE test_nonexistance2.existent_table; +DROP SCHEMA test_nonexistance2; + +ALTER TABLE IF EXISTS test_nonexistance.nonexistent_table SET TABLESPACE nonexistent_tablespace; +CREATE TABLE test_nonexistance.existent_table(i INT4); +ALTER TABLE IF EXISTS test_nonexistance.existent_table SET TABLESPACE nonexistent_tablespace; +DROP TABLE test_nonexistance.existent_table; + +DROP SCHEMA test_nonexistance; + DROP EXTENSION pg_pathman; diff --git a/sql/pathman_views.sql b/sql/pathman_views.sql new file mode 100644 index 00000000..36baa5c5 --- /dev/null +++ b/sql/pathman_views.sql @@ -0,0 +1,86 @@ +/* + * ------------------------------------------- + * NOTE: This test behaves differenly on 9.5 + * ------------------------------------------- + * + * Also since 8edd0e794 (>= 12) Append nodes with single subplan are eliminated, + * causing different output; pathman_views_2.out is the updated version. + */ + +\set VERBOSITY terse + +SET search_path = 'public'; +CREATE EXTENSION pg_pathman; +CREATE SCHEMA views; + + + +/* create a partitioned table */ +create table views._abc(id int4 not null); +select create_hash_partitions('views._abc', 'id', 10); +insert into views._abc select generate_series(1, 100); + +/* create a dummy table */ +create table views._abc_add (like views._abc); + + +vacuum analyze; + + +/* create a facade view */ +create view views.abc as select * from views._abc; + +create or replace function views.disable_modification() +returns trigger as +$$ +BEGIN + RAISE EXCEPTION '%', TG_OP; + RETURN NULL; +END; +$$ +language 'plpgsql'; + +create trigger abc_mod_tr +instead of insert or update or delete +on views.abc for each row +execute procedure views.disable_modification(); + + +/* Test SELECT */ +explain (costs off) select * from views.abc; +explain (costs off) select * from views.abc where id = 1; +explain (costs off) select * from views.abc where id = 1 for update; +select * from views.abc where id = 1 for update; +select count (*) from views.abc; + + +/* Test INSERT */ +explain (costs off) insert into views.abc values (1); +insert into views.abc values (1); + + +/* Test UPDATE */ +explain (costs off) update views.abc set id = 2 where id = 1 or id = 2; +update views.abc set id = 2 where id = 1 or id = 2; + + +/* Test DELETE */ +explain (costs off) delete from views.abc where id = 1 or id = 2; +delete from views.abc where id = 1 or id = 2; + + +/* Test SELECT with UNION */ +create view views.abc_union as table views._abc union table views._abc_add; +create view views.abc_union_all as table views._abc union all table views._abc_add; +explain (costs off) table views.abc_union; +explain (costs off) select * from views.abc_union where id = 5; +explain (costs off) table views.abc_union_all; +explain (costs off) select * from views.abc_union_all where id = 5; + + + +DROP TABLE views._abc CASCADE; +DROP TABLE views._abc_add CASCADE; +DROP FUNCTION views.disable_modification(); +DROP SCHEMA views; +DROP EXTENSION pg_pathman; diff --git a/src/compat/expand_rte_hook.c b/src/compat/expand_rte_hook.c deleted file mode 100644 index 94c866b3..00000000 --- a/src/compat/expand_rte_hook.c +++ /dev/null @@ -1,59 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * expand_rte_hook.c - * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' - * NOTE: this hook exists in PostgresPro - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "compat/expand_rte_hook.h" -#include "relation_info.h" -#include "init.h" - -#include "postgres.h" -#include "optimizer/prep.h" - - -#ifdef NATIVE_EXPAND_RTE_HOOK - -static expand_inherited_rtentry_hook_type expand_inherited_rtentry_hook_next = NULL; - -static void pathman_expand_inherited_rtentry_hook(PlannerInfo *root, - RangeTblEntry *rte, - Index rti); - - -/* Initialize 'expand_inherited_rtentry_hook' */ -void -init_expand_rte_hook(void) -{ - expand_inherited_rtentry_hook_next = expand_inherited_rtentry_hook; - expand_inherited_rtentry_hook = pathman_expand_inherited_rtentry_hook; -} - - -/* Fix parent's RowMark (makes 'rowmarks_fix' pointless) */ -static void -pathman_expand_inherited_rtentry_hook(PlannerInfo *root, - RangeTblEntry *rte, - Index rti) -{ - PlanRowMark *oldrc; - - if (!IsPathmanReady()) - return; - - /* Check that table is partitioned by pg_pathman */ - if (!get_pathman_relation_info(rte->relid)) - return; - - /* HACK: fix rowmark for parent (for preprocess_targetlist() etc) */ - oldrc = get_plan_rowmark(root->rowMarks, rti); - if (oldrc) - oldrc->isParent = true; -} - -#endif /* NATIVE_EXPAND_RTE_HOOK */ diff --git a/src/compat/pg_compat.c b/src/compat/pg_compat.c index e9792b3c..216fd382 100644 --- a/src/compat/pg_compat.c +++ b/src/compat/pg_compat.c @@ -24,6 +24,7 @@ #include "optimizer/prep.h" #include "parser/parse_utilcmd.h" #include "port.h" +#include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -47,7 +48,8 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) { int parallel_workers; - parallel_workers = compute_parallel_worker(rel, rel->pages, -1); + /* no more than max_parallel_workers_per_gather since 11 */ + parallel_workers = compute_parallel_worker_compat(rel, rel->pages, -1); /* If any limit was set to zero, the user doesn't want a parallel scan. */ if (parallel_workers <= 0) @@ -116,17 +118,6 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) #endif -/* - * ExecEvalExpr - * - * global variables for macro wrapper evaluation - */ -#if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 100000 -Datum exprResult; -ExprDoneCond isDone; -#endif - - /* * get_all_actual_clauses */ @@ -154,8 +145,13 @@ get_all_actual_clauses(List *restrictinfo_list) * make_restrictinfos_from_actual_clauses */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#else #include "optimizer/restrictinfo.h" #include "optimizer/var.h" +#endif /* 12 */ List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, @@ -185,7 +181,9 @@ make_restrictinfos_from_actual_clauses(PlannerInfo *root, root->hasPseudoConstantQuals = true; } - rinfo = make_restrictinfo(clause, + rinfo = make_restrictinfo_compat( + root, + clause, true, false, pseudoconstant, @@ -236,10 +234,16 @@ McxtStatsInternal(MemoryContext context, int level, MemoryContextCounters local_totals; MemoryContext child; - AssertArg(MemoryContextIsValid(context)); + Assert(MemoryContextIsValid(context)); /* Examine the context itself */ +#if PG_VERSION_NUM >= 140000 + (*context->methods->stats) (context, NULL, NULL, totals, true); +#elif PG_VERSION_NUM >= 110000 + (*context->methods->stats) (context, NULL, NULL, totals); +#else (*context->methods->stats) (context, level, false, totals); +#endif memset(&local_totals, 0, sizeof(local_totals)); @@ -467,6 +471,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ return; #endif + +#if PG_VERSION_NUM >= 120000 + case RTE_RESULT: + /* RESULT RTEs, in themselves, are no problem. */ + break; +#endif /* 12 */ + } /* @@ -519,7 +530,71 @@ get_rel_persistence(Oid relid) } #endif +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +/* + * Return a palloc'd bare attribute map for tuple conversion, matching input + * and output columns by name. (Dropped columns are ignored in both input and + * output.) This is normally a subroutine for convert_tuples_by_name, but can + * be used standalone. + */ +AttrNumber * +convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg) +{ + AttrNumber *attrMap; + int n; + int i; + + n = outdesc->natts; + attrMap = (AttrNumber *) palloc0(n * sizeof(AttrNumber)); + for (i = 0; i < n; i++) + { + Form_pg_attribute att = TupleDescAttr(outdesc, i); + char *attname; + Oid atttypid; + int32 atttypmod; + int j; + + if (att->attisdropped) + continue; /* attrMap[i] is already 0 */ + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + for (j = 0; j < indesc->natts; j++) + { + att = TupleDescAttr(indesc, j); + if (att->attisdropped) + continue; + if (strcmp(attname, NameStr(att->attname)) == 0) + { + /* Found it, check type */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not match corresponding attribute of type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + attrMap[i] = (AttrNumber) (j + 1); + break; + } + } + if (attrMap[i] == 0) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg_internal("%s", _(msg)), + errdetail("Attribute \"%s\" of type %s does not exist in type %s.", + attname, + format_type_be(outdesc->tdtypeid), + format_type_be(indesc->tdtypeid)))); + } + return attrMap; +} +#endif /* * ------------- @@ -553,8 +628,7 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* * Accumulate size information from each live child. */ - Assert(childrel->rows > 0); - + Assert(childrel->rows >= 0); parent_rows += childrel->rows; #if PG_VERSION_NUM >= 90600 @@ -567,6 +641,9 @@ set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti) /* Set 'rows' for append relation */ rel->rows = parent_rows; + if (parent_rows == 0) + parent_rows = 1; + #if PG_VERSION_NUM >= 90600 rel->reltarget->width = rint(parent_size / parent_rows); #else diff --git a/src/compat/relation_tags.c b/src/compat/relation_tags.c deleted file mode 100644 index 383dd1f5..00000000 --- a/src/compat/relation_tags.c +++ /dev/null @@ -1,251 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.c - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * NOTE: implementations for vanilla and PostgresPro differ - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#include "compat/relation_tags.h" -#include "planner_tree_modification.h" - -#include "nodes/nodes.h" - - -#ifndef NATIVE_RELATION_TAGS - -/* - * This table is used to ensure that partitioned relation - * cant't be referenced as ONLY and non-ONLY at the same time. - */ -static HTAB *per_table_relation_tags = NULL; - -/* - * Single row of 'per_table_relation_tags'. - * NOTE: do not reorder these fields. - */ -typedef struct -{ - Oid relid; /* key (part #1) */ - uint32 queryId; /* key (part #2) */ - List *relation_tags; -} relation_tags_entry; - -#endif - -/* Also used in get_refcount_relation_tags() etc... */ -static int per_table_relation_tags_refcount = 0; - - - -/* Look through RTE's relation tags */ -List * -rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key) -{ -#ifdef NATIVE_RELATION_TAGS - - return relation_tags_search(rte->custom_tags, key); - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - - /* Skip if table is not initialized */ - if (per_table_relation_tags) - { - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_FIND, NULL); - - if (htab_entry) - return relation_tags_search(htab_entry->relation_tags, key); - } - - /* Not found, return stub value */ - return NIL; - -#endif -} - -/* Attach new relation tag to RTE. Returns KVP with duplicate key. */ -List * -rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair) -{ - /* Common variables */ - MemoryContext old_mcxt; - const char *current_key; - List *existing_kvp, - *temp_tags; /* rte->custom_tags OR - htab_entry->relation_tags */ - -#ifdef NATIVE_RELATION_TAGS - - /* Load relation tags to 'temp_tags' */ - temp_tags = rte->custom_tags; - -#else - - relation_tags_entry *htab_entry, - htab_key = { rte->relid, query_id, NIL /* unused */ }; - bool found; - - /* We prefer to initialize this table lazily */ - if (!per_table_relation_tags) - { - const long start_elems = 50; - HASHCTL hashctl; - - memset(&hashctl, 0, sizeof(HASHCTL)); - hashctl.entrysize = sizeof(relation_tags_entry); - hashctl.keysize = offsetof(relation_tags_entry, relation_tags); - hashctl.hcxt = RELATION_TAG_MCXT; - - per_table_relation_tags = hash_create("Custom tags for RangeTblEntry", - start_elems, &hashctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - } - - /* Search by 'htab_key' */ - htab_entry = hash_search(per_table_relation_tags, - &htab_key, HASH_ENTER, &found); - - /* Don't forget to initialize list! */ - if (!found) - htab_entry->relation_tags = NIL; - - /* Load relation tags to 'temp_tags' */ - temp_tags = htab_entry->relation_tags; - -#endif - - /* Check that 'key_value_pair' is valid */ - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - /* Extract key of this KVP */ - rte_deconstruct_tag(key_value_pair, ¤t_key, NULL); - - /* Check if KVP with such key already exists */ - existing_kvp = relation_tags_search(temp_tags, current_key); - if (existing_kvp) - return existing_kvp; /* return KVP with duplicate key */ - - /* Add this KVP to relation tags list */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - temp_tags = lappend(temp_tags, key_value_pair); - MemoryContextSwitchTo(old_mcxt); - -/* Finally store 'temp_tags' to relation tags list */ -#ifdef NATIVE_RELATION_TAGS - rte->custom_tags = temp_tags; -#else - htab_entry->relation_tags = temp_tags; -#endif - - /* Success! */ - return NIL; -} - - - -/* Extract key & value from 'key_value_pair' */ -void -rte_deconstruct_tag(const List *key_value_pair, - const char **key, /* ret value #1 */ - const Value **value) /* ret value #2 */ -{ - const char *r_key; - const Value *r_value; - - AssertArg(key_value_pair && list_length(key_value_pair) == 2); - - r_key = (const char *) strVal(linitial(key_value_pair)); - r_value = (const Value *) lsecond(key_value_pair); - - /* Check that 'key' is valid */ - Assert(IsA(linitial(key_value_pair), String)); - - /* Check that 'value' is valid or NULL */ - Assert(r_value == NULL || - IsA(r_value, Integer) || - IsA(r_value, Float) || - IsA(r_value, String)); - - /* Finally return key & value */ - if (key) *key = r_key; - if (value) *value = r_value; -} - -/* Search through list of 'relation_tags' */ -List * -relation_tags_search(List *relation_tags, const char *key) -{ - ListCell *lc; - - AssertArg(key); - - /* Scan KVP list */ - foreach (lc, relation_tags) - { - List *current_kvp = (List *) lfirst(lc); - const char *current_key; - - /* Extract key of this KVP */ - rte_deconstruct_tag(current_kvp, ¤t_key, NULL); - - /* Check if this is the KVP we're looking for */ - if (strcmp(key, current_key) == 0) - return current_kvp; - } - - /* Nothing! */ - return NIL; -} - - - -/* Increate usage counter by 1 */ -void -incr_refcount_relation_tags(void) -{ - /* Increment reference counter */ - if (++per_table_relation_tags_refcount <= 0) - elog(WARNING, "imbalanced %s", - CppAsString(incr_refcount_relation_tags)); -} - -/* Return current value of usage counter */ -uint32 -get_refcount_relation_tags(void) -{ - /* incr_refcount_parenthood_statuses() is called by pathman_planner_hook() */ - return per_table_relation_tags_refcount; -} - -/* Reset all cached statuses if needed (query end) */ -void -decr_refcount_relation_tags(void) -{ - /* Decrement reference counter */ - if (--per_table_relation_tags_refcount < 0) - elog(WARNING, "imbalanced %s", - CppAsString(decr_refcount_relation_tags)); - - /* Free resources if no one is using them */ - if (per_table_relation_tags_refcount == 0) - { - reset_query_id_generator(); - -#ifndef NATIVE_RELATION_TAGS - hash_destroy(per_table_relation_tags); - per_table_relation_tags = NULL; -#endif - } -} diff --git a/src/compat/rowmarks_fix.c b/src/compat/rowmarks_fix.c index 21259e66..35eea44b 100644 --- a/src/compat/rowmarks_fix.c +++ b/src/compat/rowmarks_fix.c @@ -14,165 +14,41 @@ #include "access/sysattr.h" #include "catalog/pg_type.h" -#include "nodes/relation.h" #include "nodes/nodeFuncs.h" +#include "optimizer/planmain.h" #include "utils/builtins.h" #include "utils/rel.h" -#ifndef NATIVE_PARTITIONING_ROWMARKS +#if PG_VERSION_NUM >= 90600 -/* Special column name for rowmarks */ -#define TABLEOID_STR(subst) ( "pathman_tableoid" subst ) -#define TABLEOID_STR_BASE_LEN ( sizeof(TABLEOID_STR("")) - 1 ) - -static void lock_rows_visitor(Plan *plan, void *context); -static List *get_tableoids_list(List *tlist); - - -/* Final rowmark processing for partitioned tables */ +/* Add missing "tableoid" column for partitioned table */ void -postprocess_lock_rows(List *rtable, Plan *plan) +append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc) { - plan_tree_walker(plan, lock_rows_visitor, rtable); -} + Var *var; + char resname[32]; + TargetEntry *tle; -/* - * Add missing 'TABLEOID_STR%u' junk attributes for inherited partitions - * - * This is necessary since preprocess_targetlist() heavily - * depends on the 'inh' flag which we have to unset. - * - * postprocess_lock_rows() will later transform 'TABLEOID_STR:Oid' - * relnames into 'tableoid:rowmarkId'. - */ -void -rowmark_add_tableoids(Query *parse) -{ - ListCell *lc; + var = makeVar(rc->rti, + TableOidAttributeNumber, + OIDOID, + -1, + InvalidOid, + 0); - /* Generate 'tableoid' for partitioned table rowmark */ - foreach (lc, parse->rowMarks) - { - RowMarkClause *rc = (RowMarkClause *) lfirst(lc); - Oid parent = getrelid(rc->rti, parse->rtable); - Var *var; - TargetEntry *tle; - char resname[64]; + snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - /* Check that table is partitioned */ - if (!get_pathman_relation_info(parent)) - continue; + tle = makeTargetEntry((Expr *) var, + list_length(root->processed_tlist) + 1, + pstrdup(resname), + true); - var = makeVar(rc->rti, - TableOidAttributeNumber, - OIDOID, - -1, - InvalidOid, - 0); + root->processed_tlist = lappend(root->processed_tlist, tle); - /* Use parent's Oid as TABLEOID_STR's key (%u) */ - snprintf(resname, sizeof(resname), TABLEOID_STR("%u"), parent); - - tle = makeTargetEntry((Expr *) var, - list_length(parse->targetList) + 1, - pstrdup(resname), - true); - - /* There's no problem here since new attribute is junk */ - parse->targetList = lappend(parse->targetList, tle); - } + add_vars_to_targetlist_compat(root, list_make1(var), bms_make_singleton(0)); } -/* - * Extract target entries with resnames beginning with TABLEOID_STR - * and var->varoattno == TableOidAttributeNumber - */ -static List * -get_tableoids_list(List *tlist) -{ - List *result = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *te = (TargetEntry *) lfirst(lc); - Var *var = (Var *) te->expr; - - if (!IsA(var, Var)) - continue; - - /* Check that column name begins with TABLEOID_STR & it's tableoid */ - if (var->varoattno == TableOidAttributeNumber && - (te->resname && strlen(te->resname) > TABLEOID_STR_BASE_LEN) && - 0 == strncmp(te->resname, TABLEOID_STR(""), TABLEOID_STR_BASE_LEN)) - { - result = lappend(result, te); - } - } - - return result; -} - -/* - * Find 'TABLEOID_STR%u' attributes that were manually - * created for partitioned tables and replace Oids - * (used for '%u') with expected rc->rowmarkIds - */ -static void -lock_rows_visitor(Plan *plan, void *context) -{ - List *rtable = (List *) context; - LockRows *lock_rows = (LockRows *) plan; - Plan *lock_child = outerPlan(plan); - List *tableoids; - ListCell *lc; - - if (!IsA(lock_rows, LockRows)) - return; - - Assert(rtable && IsA(rtable, List) && lock_child); - - /* Select tableoid attributes that must be renamed */ - tableoids = get_tableoids_list(lock_child->targetlist); - if (!tableoids) - return; /* this LockRows has nothing to do with partitioned table */ - - foreach (lc, lock_rows->rowMarks) - { - PlanRowMark *rc = (PlanRowMark *) lfirst(lc); - Oid parent_oid = getrelid(rc->rti, rtable); - ListCell *mark_lc; - List *finished_tes = NIL; /* postprocessed target entries */ - - foreach (mark_lc, tableoids) - { - TargetEntry *te = (TargetEntry *) lfirst(mark_lc); - const char *cur_oid_str = &(te->resname[TABLEOID_STR_BASE_LEN]); - Datum cur_oid_datum; - - cur_oid_datum = DirectFunctionCall1(oidin, CStringGetDatum(cur_oid_str)); - - if (DatumGetObjectId(cur_oid_datum) == parent_oid) - { - char resname[64]; - - /* Replace 'TABLEOID_STR:Oid' with 'tableoid:rowmarkId' */ - snprintf(resname, sizeof(resname), "tableoid%u", rc->rowmarkId); - te->resname = pstrdup(resname); - - finished_tes = lappend(finished_tes, te); - } - } - - /* Remove target entries that have been processed in this step */ - foreach (mark_lc, finished_tes) - tableoids = list_delete_ptr(tableoids, lfirst(mark_lc)); - - if (list_length(tableoids) == 0) - break; /* nothing to do */ - } -} -#endif /* NATIVE_PARTITIONING_ROWMARKS */ +#endif diff --git a/src/debug_print.c b/src/debug_print.c index 36016861..bac1d622 100644 --- a/src/debug_print.c +++ b/src/debug_print.c @@ -8,12 +8,17 @@ * ------------------------------------------------------------------------ */ +#include #include "rangeset.h" #include "postgres.h" +#include "fmgr.h" +#include "executor/tuptable.h" #include "nodes/bitmapset.h" +#include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "lib/stringinfo.h" +#include "utils/lsyscache.h" /* diff --git a/src/declarative.c b/src/declarative.c new file mode 100644 index 00000000..42e9ffac --- /dev/null +++ b/src/declarative.c @@ -0,0 +1,382 @@ +#include "pathman.h" +#include "declarative.h" +#include "utils.h" +#include "partition_creation.h" + +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "fmgr.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/planner.h" +#include "parser/parse_coerce.h" +#include "parser/parse_func.h" +#include "utils/builtins.h" +#include "utils/int8.h" +#include "utils/int8.h" +#include "utils/lsyscache.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varbit.h" + +/* + * Modifies query of declarative partitioning commands, + * There is a little hack here, ATTACH PARTITION command + * expects relation with REL_PARTITIONED_TABLE relkind. + * To avoid this check we negate subtype, and then after the checks + * we set it back (look `is_pathman_related_partitioning_cmd`) + */ +void +modify_declarative_partitioning_query(Query *query) +{ + if (query->commandType != CMD_UTILITY) + return; + + if (IsA(query->utilityStmt, AlterTableStmt)) + { + PartRelationInfo *prel; + ListCell *lcmd; + Oid relid; + + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + relid = RangeVarGetRelid(stmt->relation, NoLock, true); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); + + foreach(lcmd, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + switch (cmd->subtype) + { + case AT_AttachPartition: + case AT_DetachPartition: + cmd->subtype = -cmd->subtype; + break; + default: + break; + } + } + } + } +} + +/* is it one of declarative partitioning commands? */ +bool +is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid) +{ + PartRelationInfo *prel; + + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + int cnt = 0; + + *parent_relid = RangeVarGetRelid(stmt->relation, NoLock, stmt->missing_ok); + + if (stmt->missing_ok && *parent_relid == InvalidOid) + return false; + + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) + return false; + + close_pathman_relation_info(prel); + + /* + * Since cmds can contain multiple commmands but we can handle only + * two of them here, so we need to check that there are only commands + * we can handle. In case if cmds contain other commands we skip all + * commands in this statement. + */ + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (abs(cmd->subtype)) + { + case AT_AttachPartition: + case AT_DetachPartition: + /* + * We need to fix all subtypes, + * possibly we're not going to handle this + */ + cmd->subtype = abs(cmd->subtype); + continue; + default: + cnt++; + } + } + + return (cnt == 0); + } + else if (IsA(parsetree, CreateStmt)) + { + /* inhRelations != NULL, partbound != NULL, tableElts == NULL */ + CreateStmt *stmt = (CreateStmt *) parsetree; + + if (stmt->inhRelations && stmt->partbound != NULL) + { + RangeVar *rv = castNode(RangeVar, linitial(stmt->inhRelations)); + *parent_relid = RangeVarGetRelid(rv, NoLock, false); + if ((prel = get_pathman_relation_info(*parent_relid)) == NULL) + return false; + + close_pathman_relation_info(prel); + if (stmt->tableElts != NIL) + elog(ERROR, "pg_pathman doesn't support column definitions " + "in declarative syntax yet"); + + return true; + + } + } + return false; +} + +static FuncExpr * +make_fn_expr(Oid funcOid, List *args) +{ + FuncExpr *fn_expr; + HeapTuple procTup; + Form_pg_proc procStruct; + + procTup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcOid)); + if (!HeapTupleIsValid(procTup)) + elog(ERROR, "cache lookup failed for function %u", funcOid); + procStruct = (Form_pg_proc) GETSTRUCT(procTup); + + fn_expr = makeFuncExpr(funcOid, procStruct->prorettype, args, + InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); + ReleaseSysCache(procTup); + return fn_expr; +} + +/* + * Transform one constant in a partition bound spec + */ +static Const * +transform_bound_value(ParseState *pstate, A_Const *con, + Oid colType, int32 colTypmod) +{ + Node *value; + + /* Make it into a Const */ + value = (Node *) make_const(pstate, &con->val, con->location); + + /* Coerce to correct type */ + value = coerce_to_target_type(pstate, + value, exprType(value), + colType, + colTypmod, + COERCION_ASSIGNMENT, + COERCE_IMPLICIT_CAST, + -1); + + if (value == NULL) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + parser_errposition(pstate, con->location))); + + /* Simplify the expression, in case we had a coercion */ + if (!IsA(value, Const)) + value = (Node *) expression_planner((Expr *) value); + + /* Fail if we don't have a constant (i.e., non-immutable coercion) */ + if (!IsA(value, Const)) + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("specified value cannot be cast to type %s", + format_type_be(colType)), + errdetail("The cast requires a non-immutable conversion."), + errhint("Try putting the literal value in single quotes."), + parser_errposition(pstate, con->location))); + + return (Const *) value; +} + +/* handle ALTER TABLE .. ATTACH PARTITION command */ +void +handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd) +{ + Oid partition_relid, + proc_args[] = { REGCLASSOID, REGCLASSOID, + ANYELEMENTOID, ANYELEMENTOID }; + + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + List *fn_args; + ParseState *pstate = make_parsestate(NULL); + PartRelationInfo *prel; + + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) pcmd->bound; + + Assert(cmd->subtype == AT_AttachPartition); + + if (bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + elog(ERROR, "relation is not partitioned"); + + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(attach_range_partition))); + + if ((!list_length(bound->lowerdatums)) || + (!list_length(bound->upperdatums))) + elog(ERROR, "provide start and end value for range partition"); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 4, proc_args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); + proc_fcinfo.argnull[0] = false; + proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[1] = false; + + /* Make function expression, we will need it to determine argument types */ + fn_args = list_make4(NULL, NULL, lval, rval); + proc_fcinfo.flinfo->fn_expr = + (Node *) make_fn_expr(proc_fcinfo.flinfo->fn_oid, fn_args); + + proc_fcinfo.arg[2] = lval->constvalue; + proc_fcinfo.argnull[2] = lval->constisnull; + proc_fcinfo.arg[3] = rval->constvalue; + proc_fcinfo.argnull[3] = rval->constisnull; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle ALTER TABLE .. DETACH PARTITION command */ +void +handle_detach_partition(AlterTableCmd *cmd) +{ + List *proc_name; + FmgrInfo proc_flinfo; + FunctionCallInfoData proc_fcinfo; + char *pathman_schema; + Oid partition_relid, + args = REGCLASSOID; + PartitionCmd *pcmd = (PartitionCmd *) cmd->def; + + Assert(cmd->subtype == AT_DetachPartition); + partition_relid = RangeVarGetRelid(pcmd->name, NoLock, false); + + /* Fetch pg_pathman's schema */ + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Build function's name */ + proc_name = list_make2(makeString(pathman_schema), + makeString(CppAsString(detach_range_partition))); + + /* Lookup function's Oid and get FmgrInfo */ + fmgr_info(LookupFuncName(proc_name, 1, &args, false), &proc_flinfo); + + InitFunctionCallInfoData(proc_fcinfo, &proc_flinfo, + 4, InvalidOid, NULL, NULL); + proc_fcinfo.arg[0] = ObjectIdGetDatum(partition_relid); + proc_fcinfo.argnull[0] = false; + + /* Invoke the callback */ + FunctionCallInvoke(&proc_fcinfo); +} + +/* handle CREATE TABLE .. PARTITION OF FOR VALUES FROM .. TO .. */ +void +handle_create_partition_of(Oid parent_relid, CreateStmt *stmt) +{ + Bound start, + end; + PartRelationInfo *prel; + ParseState *pstate = make_parsestate(NULL); + PartitionRangeDatum *ldatum, + *rdatum; + Const *lval, + *rval; + A_Const *con; + + /* in 10beta1, PartitionCmd->bound is (Node *) */ + PartitionBoundSpec *bound = (PartitionBoundSpec *) stmt->partbound; + + /* we show errors earlier for these asserts */ + Assert(stmt->inhRelations != NULL); + Assert(stmt->tableElts == NIL); + + if (bound->strategy != PARTITION_STRATEGY_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_pathman only supports queries for range partitions"))); + + if ((prel = get_pathman_relation_info(parent_relid)) == NULL) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); + + if (prel->parttype != PT_RANGE) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned by RANGE", + get_rel_name_or_relid(parent_relid)))); + + ldatum = (PartitionRangeDatum *) linitial(bound->lowerdatums); + con = castNode(A_Const, ldatum->value); + lval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + + rdatum = (PartitionRangeDatum *) linitial(bound->upperdatums); + con = castNode(A_Const, rdatum->value); + rval = transform_bound_value(pstate, con, prel->ev_type, prel->ev_typmod); + close_pathman_relation_info(prel); + + start = lval->constisnull? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(lval->constvalue); + + end = rval->constisnull? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(rval->constvalue); + + /* more checks */ + check_range_available(parent_relid, &start, &end, lval->consttype, true); + + /* Create a new RANGE partition and return its Oid */ + create_single_range_partition_internal(parent_relid, + &start, + &end, + lval->consttype, + stmt->relation, + stmt->tablespacename); +} diff --git a/src/hooks.c b/src/hooks.c index 92314c7b..2ff2667c 100644 --- a/src/hooks.c +++ b/src/hooks.c @@ -3,44 +3,56 @@ * hooks.c * definitions of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif + +#include "declarative.h" #include "hooks.h" #include "init.h" #include "partition_filter.h" +#include "partition_overseer.h" +#include "partition_router.h" #include "pathman_workers.h" #include "planner_tree_modification.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "utility_stmt_hooking.h" #include "utils.h" #include "xact_handling.h" #include "access/transam.h" +#include "access/xact.h" #include "catalog/pg_authid.h" #include "miscadmin.h" #include "optimizer/cost.h" +#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "rewrite/rewriteManip.h" -#include "utils/typcache.h" #include "utils/lsyscache.h" +#include "utils/typcache.h" +#include "utils/snapmgr.h" + + +#ifdef USE_ASSERT_CHECKING +#define USE_RELCACHE_LOGGING +#endif /* Borrowed from joinpath.c */ #define PATH_PARAM_BY_REL(path, rel) \ ((path)->param_info && bms_overlap(PATH_REQ_OUTER(path), (rel)->relids)) - static inline bool allow_star_schema_join(PlannerInfo *root, Path *outer_path, @@ -58,12 +70,13 @@ allow_star_schema_join(PlannerInfo *root, } -set_join_pathlist_hook_type set_join_pathlist_next = NULL; -set_rel_pathlist_hook_type set_rel_pathlist_hook_next = NULL; -planner_hook_type planner_hook_next = NULL; -post_parse_analyze_hook_type post_parse_analyze_hook_next = NULL; -shmem_startup_hook_type shmem_startup_hook_next = NULL; -ProcessUtility_hook_type process_utility_hook_next = NULL; +set_join_pathlist_hook_type pathman_set_join_pathlist_next = NULL; +set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next = NULL; +planner_hook_type pathman_planner_hook_next = NULL; +post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next = NULL; +shmem_startup_hook_type pathman_shmem_startup_hook_next = NULL; +ProcessUtility_hook_type pathman_process_utility_hook_next = NULL; +ExecutorStart_hook_type pathman_executor_start_hook_prev = NULL; /* Take care of joins */ @@ -78,7 +91,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, JoinCostWorkspace workspace; JoinType saved_jointype = jointype; RangeTblEntry *inner_rte = root->simple_rte_array[innerrel->relid]; - const PartRelationInfo *inner_prel; + PartRelationInfo *inner_prel; List *joinclauses, *otherclauses; WalkerContext context; @@ -87,36 +100,86 @@ pathman_join_pathlist_hook(PlannerInfo *root, ListCell *lc; /* Call hooks set by other extensions */ - if (set_join_pathlist_next) - set_join_pathlist_next(root, joinrel, outerrel, - innerrel, jointype, extra); + if (pathman_set_join_pathlist_next) + pathman_set_join_pathlist_next(root, joinrel, outerrel, + innerrel, jointype, extra); /* Check that both pg_pathman & RuntimeAppend nodes are enabled */ if (!IsPathmanReady() || !pg_pathman_enable_runtimeappend) return; - if (jointype == JOIN_FULL || jointype == JOIN_RIGHT) - return; /* we can't handle full or right outer joins */ + /* We should only consider base inner relations */ + if (innerrel->reloptkind != RELOPT_BASEREL) + return; - /* Check that innerrel is a BASEREL with inheritors & PartRelationInfo */ - if (innerrel->reloptkind != RELOPT_BASEREL || !inner_rte->inh || - !(inner_prel = get_pathman_relation_info(inner_rte->relid))) - { - return; /* Obviously not our case */ - } + /* We shouldn't process tables with active children */ + if (inner_rte->inh) + return; + + /* We shouldn't process functions etc */ + if (inner_rte->rtekind != RTE_RELATION) + return; + + /* We don't support these join types (since inner will be parameterized) */ + if (jointype == JOIN_FULL || + jointype == JOIN_RIGHT || + jointype == JOIN_UNIQUE_INNER) + return; + + /* Skip if inner table is not allowed to act as parent (e.g. FROM ONLY) */ + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(inner_rte)) + return; + + /* Proceed iff relation 'innerrel' is partitioned */ + if ((inner_prel = get_pathman_relation_info(inner_rte->relid)) == NULL) + return; /* - * These codes are used internally in the planner, but are not supported - * by the executor (nor, indeed, by most of the planner). + * Check if query is: + * 1) UPDATE part_table SET = .. FROM part_table. + * 2) DELETE FROM part_table USING part_table. + * + * Either outerrel or innerrel may be a result relation. */ + if ((root->parse->resultRelation == outerrel->relid || + root->parse->resultRelation == innerrel->relid) && + (root->parse->commandType == CMD_UPDATE || + root->parse->commandType == CMD_DELETE)) + { + int rti = -1, + count = 0; + + /* Inner relation must be partitioned */ + Assert(inner_prel); + + /* Check each base rel of outer relation */ + while ((rti = bms_next_member(outerrel->relids, rti)) >= 0) + { + Oid outer_baserel = root->simple_rte_array[rti]->relid; + + /* Is it partitioned? */ + if (has_pathman_relation_info(outer_baserel)) + count++; + } + + if (count > 0) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("DELETE and UPDATE queries with a join " + "of partitioned tables are not supported"))); + } + + /* Replace virtual join types with a real one */ if (jointype == JOIN_UNIQUE_OUTER || jointype == JOIN_UNIQUE_INNER) - jointype = JOIN_INNER; /* replace with a proper value */ + jointype = JOIN_INNER; /* Extract join clauses which will separate partitions */ if (IS_OUTER_JOIN(extra->sjinfo->jointype)) { - extract_actual_join_clauses(extra->restrictlist, - &joinclauses, &otherclauses); + extract_actual_join_clauses_compat(extra->restrictlist, + joinrel->relids, + &joinclauses, + &otherclauses); } else { @@ -170,11 +233,6 @@ pathman_join_pathlist_hook(PlannerInfo *root, Assert(outer); } - /* No way to do this in a parameterized inner path */ - if (saved_jointype == JOIN_UNIQUE_INNER) - return; - - /* Make inner path depend on outerrel's columns */ required_inner = bms_union(PATH_REQ_OUTER((Path *) cur_inner_path), outerrel->relids); @@ -193,12 +251,12 @@ pathman_join_pathlist_hook(PlannerInfo *root, innerrel->relid))) continue; - inner = create_runtimeappend_path(root, cur_inner_path, ppi, paramsel); + /* Try building RuntimeAppend path, skip if it's not possible */ + inner = create_runtime_append_path(root, cur_inner_path, ppi, paramsel); if (!inner) - return; /* could not build it, retreat! */ - + continue; - required_nestloop = calc_nestloop_required_outer(outer, inner); + required_nestloop = calc_nestloop_required_outer_compat(outer, inner); /* * Check to see if proposed path is still parameterized, and reject if the @@ -211,7 +269,7 @@ pathman_join_pathlist_hook(PlannerInfo *root, ((!bms_overlap(required_nestloop, extra->param_source_rels) && !allow_star_schema_join(root, outer, inner)) || have_dangerous_phv(root, outer->parent->relids, required_inner))) - return; + continue; initial_cost_nestloop_compat(root, &workspace, jointype, outer, inner, extra); @@ -229,16 +287,20 @@ pathman_join_pathlist_hook(PlannerInfo *root, nest_path = create_nestloop_path_compat(root, joinrel, jointype, - &workspace, extra, outer, inner, - filtered_joinclauses, pathkeys, - calc_nestloop_required_outer(outer, inner)); + &workspace, extra, outer, inner, + filtered_joinclauses, pathkeys, + calc_nestloop_required_outer_compat(outer, inner)); /* * NOTE: Override 'rows' value produced by standard estimator. * Currently we use get_parameterized_joinrel_size() since * it works just fine, but this might change some day. */ +#if PG_VERSION_NUM >= 150000 /* for commit 18fea737b5e4 */ + nest_path->jpath.path.rows = +#else nest_path->path.rows = +#endif get_parameterized_joinrel_size_compat(root, joinrel, outer, inner, extra->sjinfo, @@ -247,6 +309,9 @@ pathman_join_pathlist_hook(PlannerInfo *root, /* Finally we can add the new NestLoop path */ add_path(joinrel, (Path *) nest_path); } + + /* Don't forget to close 'inner_prel'! */ + close_pathman_relation_info(inner_prel); } /* Cope with simple relations */ @@ -256,17 +321,34 @@ pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte) { - const PartRelationInfo *prel; - int irange_len; + PartRelationInfo *prel; + Relation parent_rel; /* parent's relation (heap) */ + PlanRowMark *parent_rowmark; /* parent's rowmark */ + Oid *children; /* selected children oids */ + List *ranges, /* a list of IndexRanges */ + *wrappers; /* a list of WrapperNodes */ + PathKey *pathkeyAsc = NULL, + *pathkeyDesc = NULL; + double paramsel = 1.0; /* default part selectivity */ + WalkerContext context; + Node *part_expr; + List *part_clauses; + ListCell *lc; + int irange_len, + i; /* Invoke original hook if needed */ - if (set_rel_pathlist_hook_next != NULL) - set_rel_pathlist_hook_next(root, rel, rti, rte); + if (pathman_set_rel_pathlist_hook_next) + pathman_set_rel_pathlist_hook_next(root, rel, rti, rte); /* Make sure that pg_pathman is ready */ if (!IsPathmanReady()) return; + /* We shouldn't process tables with active children */ + if (rte->inh) + return; + /* * Skip if it's a result relation (UPDATE | DELETE | INSERT), * or not a (partitioned) physical relation at all. @@ -276,200 +358,294 @@ pathman_rel_pathlist_hook(PlannerInfo *root, root->parse->resultRelation == rti) return; -/* It's better to exit, since RowMarks might be broken (hook aims to fix them) */ -#ifndef NATIVE_EXPAND_RTE_HOOK +#ifdef LEGACY_ROWMARKS_95 + /* It's better to exit, since RowMarks might be broken */ if (root->parse->commandType != CMD_SELECT && root->parse->commandType != CMD_INSERT) return; + + /* SELECT FOR SHARE/UPDATE is not handled by above check */ + foreach(lc, root->rowMarks) + { + PlanRowMark *rc = (PlanRowMark *) lfirst(lc); + + if (rc->rti == rti) + return; + } #endif /* Skip if this table is not allowed to act as parent (e.g. FROM ONLY) */ - if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(root->parse->queryId, rte)) + if (PARENTHOOD_DISALLOWED == get_rel_parenthood_status(rte)) return; /* Proceed iff relation 'rel' is partitioned */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + if ((prel = get_pathman_relation_info(rte->relid)) == NULL) + return; + + /* + * Check that this child is not the parent table itself. + * This is exactly how standard inheritance works. + * + * Helps with queries like this one: + * + * UPDATE test.tmp t SET value = 2 + * WHERE t.id IN (SELECT id + * FROM test.tmp2 t2 + * WHERE id = t.id); + * + * or unions, multilevel partitioning, etc. + * + * Since we disable optimizations on 9.5, we + * have to skip parent table that has already + * been expanded by standard inheritance. + */ + if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL) { - Relation parent_rel; /* parent's relation (heap) */ - Oid *children; /* selected children oids */ - List *ranges, /* a list of IndexRanges */ - *wrappers; /* a list of WrapperNodes */ - PathKey *pathkeyAsc = NULL, - *pathkeyDesc = NULL; - double paramsel = 1.0; /* default part selectivity */ - WalkerContext context; - Node *part_expr; - List *part_clauses; - ListCell *lc; - int i; + foreach (lc, root->append_rel_list) + { + AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc); + Oid child_oid, + parent_oid; - /* Make copy of partitioning expression and fix Var's varno attributes */ - part_expr = PrelExpressionForRelid(prel, rti); + /* Is it actually the same table? */ + child_oid = root->simple_rte_array[appinfo->child_relid]->relid; + parent_oid = root->simple_rte_array[appinfo->parent_relid]->relid; - if (prel->parttype == PT_RANGE) - { /* - * Get pathkeys for ascending and descending sort by partitioned column. + * If there's an 'appinfo', it means that somebody + * (PG?) has already processed this partitioned table + * and added its children to the plan. */ - List *pathkeys; - TypeCacheEntry *tce; - - /* Determine operator type */ - tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); - - /* Make pathkeys */ - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->lt_opr, NULL, false); - if (pathkeys) - pathkeyAsc = (PathKey *) linitial(pathkeys); - pathkeys = build_expression_pathkey(root, (Expr *) part_expr, NULL, - tce->gt_opr, NULL, false); - if (pathkeys) - pathkeyDesc = (PathKey *) linitial(pathkeys); + if (appinfo->child_relid == rti && + OidIsValid(appinfo->parent_reloid)) + { + if (child_oid == parent_oid) + goto cleanup; + else if (!has_pathman_relation_info(parent_oid)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("could not expand partitioned table \"%s\"", + get_rel_name(child_oid)), + errhint("Do not use inheritance and pg_pathman partitions together"))); + } } + } - /* HACK: we must restore 'inh' flag! */ - rte->inh = true; - - children = PrelGetChildrenArray(prel); - ranges = list_make1_irange_full(prel, IR_COMPLETE); + /* Make copy of partitioning expression and fix Var's varno attributes */ + part_expr = PrelExpressionForRelid(prel, rti); - /* Make wrappers over restrictions and collect final rangeset */ - InitWalkerContext(&context, part_expr, prel, NULL); - wrappers = NIL; - foreach(lc, rel->baserestrictinfo) - { - WrapperNode *wrap; - RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + /* Get partitioning-related clauses (do this before append_child_relation()) */ + part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); - wrap = walk_expr_tree(rinfo->clause, &context); + if (prel->parttype == PT_RANGE) + { + /* + * Get pathkeys for ascending and descending sort by partitioned column. + */ + List *pathkeys; + TypeCacheEntry *tce; + + /* Determine operator type */ + tce = lookup_type_cache(prel->ev_type, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + + /* Make pathkeys */ + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->lt_opr, NULL, false); + if (pathkeys) + pathkeyAsc = (PathKey *) linitial(pathkeys); + pathkeys = build_expression_pathkey_compat(root, (Expr *) part_expr, NULL, + tce->gt_opr, NULL, false); + if (pathkeys) + pathkeyDesc = (PathKey *) linitial(pathkeys); + } - paramsel *= wrap->paramsel; - wrappers = lappend(wrappers, wrap); - ranges = irange_list_intersection(ranges, wrap->rangeset); - } + children = PrelGetChildrenArray(prel); + ranges = list_make1_irange_full(prel, IR_COMPLETE); - /* Get number of selected partitions */ - irange_len = irange_list_length(ranges); - if (prel->enable_parent) - irange_len++; /* also add parent */ + /* Make wrappers over restrictions and collect final rangeset */ + InitWalkerContext(&context, part_expr, prel, NULL); + wrappers = NIL; + foreach(lc, rel->baserestrictinfo) + { + WrapperNode *wrap; + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - /* Expand simple_rte_array and simple_rel_array */ - if (irange_len > 0) - { - int current_len = root->simple_rel_array_size, - new_len = current_len + irange_len; + wrap = walk_expr_tree(rinfo->clause, &context); - /* Expand simple_rel_array */ - root->simple_rel_array = (RelOptInfo **) - repalloc(root->simple_rel_array, - new_len * sizeof(RelOptInfo *)); + paramsel *= wrap->paramsel; + wrappers = lappend(wrappers, wrap); + ranges = irange_list_intersection(ranges, wrap->rangeset); + } - memset((void *) &root->simple_rel_array[current_len], 0, - irange_len * sizeof(RelOptInfo *)); + /* Get number of selected partitions */ + irange_len = irange_list_length(ranges); + if (prel->enable_parent) + irange_len++; /* also add parent */ - /* Expand simple_rte_array */ - root->simple_rte_array = (RangeTblEntry **) - repalloc(root->simple_rte_array, - new_len * sizeof(RangeTblEntry *)); + /* Expand simple_rte_array and simple_rel_array */ + if (irange_len > 0) + { + int current_len = root->simple_rel_array_size, + new_len = current_len + irange_len; - memset((void *) &root->simple_rte_array[current_len], 0, - irange_len * sizeof(RangeTblEntry *)); + /* Expand simple_rel_array */ + root->simple_rel_array = (RelOptInfo **) + repalloc(root->simple_rel_array, + new_len * sizeof(RelOptInfo *)); - /* Don't forget to update array size! */ - root->simple_rel_array_size = new_len; - } + memset((void *) &root->simple_rel_array[current_len], 0, + irange_len * sizeof(RelOptInfo *)); - /* Parent has already been locked by rewriter */ - parent_rel = heap_open(rte->relid, NoLock); + /* Expand simple_rte_array */ + root->simple_rte_array = (RangeTblEntry **) + repalloc(root->simple_rte_array, + new_len * sizeof(RangeTblEntry *)); - /* Add parent if asked to */ - if (prel->enable_parent) - append_child_relation(root, parent_rel, rti, 0, rte->relid, NULL); + memset((void *) &root->simple_rte_array[current_len], 0, + irange_len * sizeof(RangeTblEntry *)); +#if PG_VERSION_NUM >= 110000 /* - * Iterate all indexes in rangeset and append corresponding child relations. + * Make sure append_rel_array is wide enough; if it hasn't been + * allocated previously, care to zero out [0; current_len) part. */ - foreach(lc, ranges) - { - IndexRange irange = lfirst_irange(lc); + if (root->append_rel_array == NULL) + root->append_rel_array = (AppendRelInfo **) + palloc0(current_len * sizeof(AppendRelInfo *)); + root->append_rel_array = (AppendRelInfo **) + repalloc(root->append_rel_array, + new_len * sizeof(AppendRelInfo *)); + memset((void *) &root->append_rel_array[current_len], 0, + irange_len * sizeof(AppendRelInfo *)); +#endif - for (i = irange_lower(irange); i <= irange_upper(irange); i++) - append_child_relation(root, parent_rel, rti, i, children[i], wrappers); - } + /* Don't forget to update array size! */ + root->simple_rel_array_size = new_len; + } - /* Now close parent relation */ - heap_close(parent_rel, NoLock); + /* Parent has already been locked by rewriter */ + parent_rel = heap_open_compat(rte->relid, NoLock); - /* Clear path list and make it point to NIL */ - list_free_deep(rel->pathlist); - rel->pathlist = NIL; + parent_rowmark = get_plan_rowmark(root->rowMarks, rti); -#if PG_VERSION_NUM >= 90600 - /* Clear old partial path list */ - list_free(rel->partial_pathlist); - rel->partial_pathlist = NIL; -#endif + /* Add parent if asked to */ + if (prel->enable_parent) + append_child_relation(root, parent_rel, parent_rowmark, + rti, 0, rte->relid, NULL); + + /* Iterate all indexes in rangeset and append child relations */ + foreach(lc, ranges) + { + IndexRange irange = lfirst_irange(lc); + + for (i = irange_lower(irange); i <= irange_upper(irange); i++) + append_child_relation(root, parent_rel, parent_rowmark, + rti, i, children[i], wrappers); + } - /* Generate new paths using the rels we've just added */ - set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); - set_append_rel_size_compat(root, rel, rti); + /* Now close parent relation */ + heap_close_compat(parent_rel, NoLock); + + /* Clear path list and make it point to NIL */ + list_free_deep(rel->pathlist); + rel->pathlist = NIL; #if PG_VERSION_NUM >= 90600 - /* consider gathering partial paths for the parent appendrel */ - generate_gather_paths(root, rel); + /* Clear old partial path list */ + list_free(rel->partial_pathlist); + rel->partial_pathlist = NIL; #endif - /* No need to go further (both nodes are disabled), return */ - if (!(pg_pathman_enable_runtimeappend || - pg_pathman_enable_runtime_merge_append)) - return; + /* Generate new paths using the rels we've just added */ + set_append_rel_pathlist(root, rel, rti, pathkeyAsc, pathkeyDesc); + set_append_rel_size_compat(root, rel, rti); - /* Get partitioning-related clauses */ - part_clauses = get_partitioning_clauses(rel->baserestrictinfo, prel, rti); + /* consider gathering partial paths for the parent appendrel */ + generate_gather_paths_compat(root, rel); - /* Skip if there's no PARAMs in partitioning-related clauses */ - if (!clause_contains_params((Node *) part_clauses)) - return; + /* Skip if both custom nodes are disabled */ + if (!(pg_pathman_enable_runtimeappend || + pg_pathman_enable_runtime_merge_append)) + goto cleanup; - /* Generate Runtime[Merge]Append paths if needed */ - foreach (lc, rel->pathlist) - { - AppendPath *cur_path = (AppendPath *) lfirst(lc); - Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); - Path *inner_path = NULL; - ParamPathInfo *ppi; - - /* Skip if rel contains some join-related stuff or path type mismatched */ - if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || - rel->has_eclass_joins || rel->joininfo) - { - continue; - } + /* Skip if there's no PARAMs in partitioning-related clauses */ + if (!clause_contains_params((Node *) part_clauses)) + goto cleanup; - /* Get existing parameterization */ - ppi = get_appendrel_parampathinfo(rel, inner_required); + /* Generate Runtime[Merge]Append paths if needed */ + foreach (lc, rel->pathlist) + { + AppendPath *cur_path = (AppendPath *) lfirst(lc); + Relids inner_required = PATH_REQ_OUTER((Path *) cur_path); + Path *inner_path = NULL; + ParamPathInfo *ppi; + + /* Skip if rel contains some join-related stuff or path type mismatched */ + if (!(IsA(cur_path, AppendPath) || IsA(cur_path, MergeAppendPath)) || + rel->has_eclass_joins || rel->joininfo) + { + continue; + } - if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) - inner_path = create_runtimeappend_path(root, cur_path, - ppi, paramsel); - else if (IsA(cur_path, MergeAppendPath) && - pg_pathman_enable_runtime_merge_append) - { - /* Check struct layout compatibility */ - if (offsetof(AppendPath, subpaths) != - offsetof(MergeAppendPath, subpaths)) - elog(FATAL, "Struct layouts of AppendPath and " - "MergeAppendPath differ"); - - inner_path = create_runtimemergeappend_path(root, cur_path, - ppi, paramsel); - } + /* Get existing parameterization */ + ppi = get_appendrel_parampathinfo(rel, inner_required); - if (inner_path) - add_path(rel, inner_path); + if (IsA(cur_path, AppendPath) && pg_pathman_enable_runtimeappend) + inner_path = create_runtime_append_path(root, cur_path, + ppi, paramsel); + else if (IsA(cur_path, MergeAppendPath) && + pg_pathman_enable_runtime_merge_append) + { + /* Check struct layout compatibility */ + if (offsetof(AppendPath, subpaths) != + offsetof(MergeAppendPath, subpaths)) + elog(FATAL, "Struct layouts of AppendPath and " + "MergeAppendPath differ"); + + inner_path = create_runtime_merge_append_path(root, cur_path, + ppi, paramsel); } + + if (inner_path) + add_path(rel, inner_path); } + +cleanup: + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); +} + +/* + * 'pg_pathman.enable' GUC check. + */ +bool +pathman_enable_check_hook(bool *newval, void **extra, GucSource source) +{ + /* The top level statement requires immediate commit: accept GUC change */ + if (MyXactFlags & XACT_FLAGS_NEEDIMMEDIATECOMMIT) + return true; + + /* Ignore the case of re-setting the same value */ + if (*newval == pathman_init_state.pg_pathman_enable) + return true; + + /* Command must be at top level of a fresh transaction. */ + if (FirstSnapshotSet || + GetTopTransactionIdIfAny() != InvalidTransactionId || +#ifdef PGPRO_EE + getNestLevelATX() > 0 || +#endif + IsSubTransaction()) + { + ereport(WARNING, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("\"pg_pathman.enable\" must be called before any query, ignored"))); + + /* Keep the old value. */ + *newval = pathman_init_state.pg_pathman_enable; + } + + return true; } /* @@ -481,15 +657,20 @@ pathman_enable_assign_hook(bool newval, void *extra) elog(DEBUG2, "pg_pathman_enable_assign_hook() [newval = %s] triggered", newval ? "true" : "false"); - /* Return quickly if nothing has changed */ - if (newval == (pathman_init_state.pg_pathman_enable && - pathman_init_state.auto_partition && - pathman_init_state.override_copy && - pg_pathman_enable_runtimeappend && - pg_pathman_enable_runtime_merge_append && - pg_pathman_enable_partition_filter && - pg_pathman_enable_bounds_cache)) - return; + if (!(newval == pathman_init_state.pg_pathman_enable && + newval == pathman_init_state.auto_partition && + newval == pathman_init_state.override_copy && + newval == pg_pathman_enable_runtimeappend && + newval == pg_pathman_enable_runtime_merge_append && + newval == pg_pathman_enable_partition_filter && + newval == pg_pathman_enable_bounds_cache)) + { + elog(NOTICE, + "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " + "and some other options have been %s", + newval ? "enabled" : "disabled"); + } + pathman_init_state.auto_partition = newval; pathman_init_state.override_copy = newval; @@ -498,30 +679,69 @@ pathman_enable_assign_hook(bool newval, void *extra) pg_pathman_enable_partition_filter = newval; pg_pathman_enable_bounds_cache = newval; - elog(NOTICE, - "RuntimeAppend, RuntimeMergeAppend and PartitionFilter nodes " - "and some other options have been %s", - newval ? "enabled" : "disabled"); + /* Purge caches if pathman was disabled */ + if (!newval) + { + unload_config(); + } +} + +static void +execute_for_plantree(PlannedStmt *planned_stmt, + Plan *(*proc) (List *rtable, Plan *plan)) +{ + List *subplans = NIL; + ListCell *lc; + Plan *resplan = proc(planned_stmt->rtable, planned_stmt->planTree); + + if (resplan) + planned_stmt->planTree = resplan; + + foreach (lc, planned_stmt->subplans) + { + Plan *subplan = lfirst(lc); + resplan = proc(planned_stmt->rtable, (Plan *) lfirst(lc)); + if (resplan) + subplans = lappend(subplans, resplan); + else + subplans = lappend(subplans, subplan); + } + planned_stmt->subplans = subplans; +} + +/* + * Truncated version of set_plan_refs. + * Pathman can add nodes to already completed and post-processed plan tree. + * reset_plan_node_ids fixes some presentation values for updated plan tree + * to avoid problems in further processing. + */ +static Plan * +reset_plan_node_ids(Plan *plan, void *lastPlanNodeId) +{ + if (plan == NULL) + return NULL; + + plan->plan_node_id = (*(int *) lastPlanNodeId)++; + + return plan; } /* * Planner hook. It disables inheritance for tables that have been partitioned * by pathman to prevent standart PostgreSQL partitioning mechanism from - * handling that tables. + * handling those tables. + * + * Since >= 13 (6aba63ef3e6) query_string parameter was added. */ PlannedStmt * +#if PG_VERSION_NUM >= 130000 +pathman_planner_hook(Query *parse, const char *query_string, int cursorOptions, ParamListInfo boundParams) +#else pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) +#endif { -#define ExecuteForPlanTree(planned_stmt, proc) \ - do { \ - ListCell *lc; \ - proc((planned_stmt)->rtable, (planned_stmt)->planTree); \ - foreach (lc, (planned_stmt)->subplans) \ - proc((planned_stmt)->rtable, (Plan *) lfirst(lc)); \ - } while (0) - PlannedStmt *result; - uint32 query_id = parse->queryId; + uint64 query_id = parse->queryId; /* Save the result in case it changes */ bool pathman_ready = IsPathmanReady(); @@ -530,29 +750,47 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Increment relation tags refcount */ - incr_refcount_relation_tags(); + /* Increase planner() calls count */ + incr_planner_calls_count(); /* Modify query tree if needed */ pathman_transform_query(parse, boundParams); } /* Invoke original hook if needed */ - if (planner_hook_next) - result = planner_hook_next(parse, cursorOptions, boundParams); + if (pathman_planner_hook_next) +#if PG_VERSION_NUM >= 130000 + result = pathman_planner_hook_next(parse, query_string, cursorOptions, boundParams); +#else + result = pathman_planner_hook_next(parse, cursorOptions, boundParams); +#endif else +#if PG_VERSION_NUM >= 130000 + result = standard_planner(parse, query_string, cursorOptions, boundParams); +#else result = standard_planner(parse, cursorOptions, boundParams); +#endif if (pathman_ready) { - /* Give rowmark-related attributes correct names */ - ExecuteForPlanTree(result, postprocess_lock_rows); + int lastPlanNodeId = 0; + ListCell *l; /* Add PartitionFilter node for INSERT queries */ - ExecuteForPlanTree(result, add_partition_filters); + execute_for_plantree(result, add_partition_filters); + + /* Add PartitionRouter node for UPDATE queries */ + execute_for_plantree(result, add_partition_routers); - /* Decrement relation tags refcount */ - decr_refcount_relation_tags(); + /* Decrement planner() calls count */ + decr_planner_calls_count(); + + /* remake parsed tree presentation fixes due to possible adding nodes */ + result->planTree = plan_tree_visitor(result->planTree, reset_plan_node_ids, &lastPlanNodeId); + foreach(l, result->subplans) + { + lfirst(l) = plan_tree_visitor((Plan *) lfirst(l), reset_plan_node_ids, &lastPlanNodeId); + } /* HACK: restore queryId set by pg_stat_statements */ result->queryId = query_id; @@ -563,8 +801,8 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) { if (pathman_ready) { - /* Caught an ERROR, decrease refcount */ - decr_refcount_relation_tags(); + /* Caught an ERROR, decrease count */ + decr_planner_calls_count(); } /* Rethrow ERROR further */ @@ -578,48 +816,73 @@ pathman_planner_hook(Query *parse, int cursorOptions, ParamListInfo boundParams) /* * Post parse analysis hook. It makes sure the config is loaded before executing - * any statement, including utility commands + * any statement, including utility commands. + */ +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) */ void -pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query, JumbleState *jstate) +{ + /* Invoke original hook if needed */ + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query, jstate); +#else +void +pathman_post_parse_analyze_hook(ParseState *pstate, Query *query) { /* Invoke original hook if needed */ - if (post_parse_analyze_hook_next) - post_parse_analyze_hook_next(pstate, query); + if (pathman_post_parse_analyze_hook_next) + pathman_post_parse_analyze_hook_next(pstate, query); +#endif - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; - /* Finish delayed invalidation jobs */ - if (IsPathmanReady()) - finish_delayed_invalidation(); + /* We shouldn't proceed on: ... */ + if (query->commandType == CMD_UTILITY) + { + /* ... BEGIN */ + if (xact_is_transaction_stmt(query->utilityStmt)) + return; - /* - * We shouldn't proceed on: - * BEGIN - * SET [TRANSACTION] - */ - if (query->commandType == CMD_UTILITY && - (xact_is_transaction_stmt(query->utilityStmt) || - xact_is_set_stmt(query->utilityStmt))) - return; + /* ... SET pg_pathman.enable */ + if (xact_is_set_stmt(query->utilityStmt, PATHMAN_ENABLE)) + { + /* Accept all events in case it's "enable = OFF" */ + if (IsPathmanReady()) + finish_delayed_invalidation(); - /* - * We should also disable pg_pathman on: - * ALTER EXTENSION pg_pathman - */ - if (query->commandType == CMD_UTILITY && - xact_is_alter_pathman_stmt(query->utilityStmt)) - { - /* Disable pg_pathman to perform a painless update */ - (void) set_config_option(PATHMAN_ENABLE, "off", - PGC_SUSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0, false); + return; + } - return; + /* ... SET [TRANSACTION] */ + if (xact_is_set_stmt(query->utilityStmt, NULL)) + return; + + /* ... ALTER EXTENSION pg_pathman */ + if (xact_is_alter_pathman_stmt(query->utilityStmt)) + { + /* Leave no delayed events before ALTER EXTENSION */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + + /* Disable pg_pathman to perform a painless update */ + (void) set_config_option(PATHMAN_ENABLE, "off", + PGC_SUSET, PGC_S_SESSION, + GUC_ACTION_SAVE, true, 0, false); + + return; + } } + /* Finish all delayed invalidation jobs */ + if (IsPathmanReady()) + finish_delayed_invalidation(); + /* Load config if pg_pathman exists & it's still necessary */ if (IsPathmanEnabled() && !IsPathmanInitialized() && @@ -628,12 +891,14 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) { load_config(); /* perform main cache initialization */ } + if (!IsPathmanReady()) + return; /* Process inlined SQL functions (we've already entered planning stage) */ - if (IsPathmanReady() && get_refcount_relation_tags() > 0) + if (IsPathmanReady() && get_planner_calls_count() > 0) { /* Check that pg_pathman is the last extension loaded */ - if (post_parse_analyze_hook != pathman_post_parse_analysis_hook) + if (post_parse_analyze_hook != pathman_post_parse_analyze_hook) { Oid save_userid; int save_sec_context; @@ -674,7 +939,16 @@ pathman_post_parse_analysis_hook(ParseState *pstate, Query *query) /* Modify query tree if needed */ pathman_transform_query(query, NULL); + return; } + +#if PG_VERSION_NUM >= 100000 + /* + * for now this call works only for declarative partitioning so + * we disabled it + */ + pathman_post_analyze_query(query); +#endif } /* @@ -684,8 +958,8 @@ void pathman_shmem_startup_hook(void) { /* Invoke original hook if needed */ - if (shmem_startup_hook_next != NULL) - shmem_startup_hook_next(); + if (pathman_shmem_startup_hook_next) + pathman_shmem_startup_hook_next(); /* Allocate shared memory objects */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); @@ -699,81 +973,85 @@ pathman_shmem_startup_hook(void) void pathman_relcache_hook(Datum arg, Oid relid) { - PartParentSearch search; - Oid partitioned_table; + Oid pathman_config_relid; - /* Hooks can be disabled */ + /* See cook_partitioning_expression() */ if (!pathman_hooks_enabled) return; if (!IsPathmanReady()) return; - /* We shouldn't even consider special OIDs */ - if (relid < FirstNormalObjectId) - return; + /* Invalidation event for whole cache */ + if (relid == InvalidOid) + { + invalidate_bounds_cache(); + invalidate_parents_cache(); + invalidate_status_cache(); + delay_pathman_shutdown(); /* see below */ + } - /* Invalidation event for PATHMAN_CONFIG table (probably DROP) */ - if (relid == get_pathman_config_relid(false)) + /* + * Invalidation event for PATHMAN_CONFIG table (probably DROP EXTENSION). + * Digging catalogs here is expensive and probably illegal, so we take + * cached relid. It is possible that we don't know it atm (e.g. pathman + * was disabled). However, in this case caches must have been cleaned + * on disable, and there is no DROP-specific additional actions. + */ + pathman_config_relid = get_pathman_config_relid(true); + if (relid == pathman_config_relid) + { delay_pathman_shutdown(); + } - /* Invalidate PartBoundInfo cache if needed */ - forget_bounds_of_partition(relid); - - /* Invalidate PartParentInfo cache if needed */ - partitioned_table = forget_parent_of_partition(relid, &search); - - switch (search) + /* Invalidation event for some user table */ + else if (relid >= FirstNormalObjectId) { - /* It is (or was) a valid partition */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - elog(DEBUG2, "Invalidation message for partition %u [%u]", - relid, MyProcPid); - - delay_invalidation_parent_rel(partitioned_table); - } - break; - - /* Both syscache and pathman's cache say it isn't a partition */ - case PPS_ENTRY_NOT_FOUND: - { - Assert(partitioned_table == InvalidOid); - - /* Which means that 'relid' might be parent */ - if (relid != InvalidOid) - delay_invalidation_vague_rel(relid); -#ifdef NOT_USED - elog(DEBUG2, "Invalidation message for relation %u [%u]", - relid, MyProcPid); -#endif - } - break; + /* Invalidate PartBoundInfo entry if needed */ + forget_bounds_of_rel(relid); - /* We can't say anything (state is not transactional) */ - case PPS_NOT_SURE: - { - elog(DEBUG2, "Invalidation message for vague relation %u [%u]", - relid, MyProcPid); - - delay_invalidation_vague_rel(relid); - } - break; + /* Invalidate PartStatusInfo entry if needed */ + forget_status_of_relation(relid); - default: - elog(ERROR, "Not implemented yet (%s)", - CppAsString(pathman_relcache_hook)); - break; + /* Invalidate PartParentInfo entry if needed */ + forget_parent_of_partition(relid); } } /* * Utility function invoker hook. * NOTE: 'first_arg' is (PlannedStmt *) in PG 10, or (Node *) in PG <= 9.6. + * In PG 13 (2f9661311b8) command completion tags was reworked (added QueryCompletion struct) */ void -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +/* + * pathman_post_parse_analyze_hook(), pathman_post_parse_analyze_hook_next(): + * in 14 new argument was added (5fd9dfa5f50) + */ +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 130000 +pathman_process_utility_hook(PlannedStmt *first_arg, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, QueryCompletion *queryCompletion) +{ + Node *parsetree = first_arg->utilityStmt; + int stmt_location = first_arg->stmt_location, + stmt_len = first_arg->stmt_len; +#elif PG_VERSION_NUM >= 100000 pathman_process_utility_hook(PlannedStmt *first_arg, const char *queryString, ProcessUtilityContext context, @@ -802,6 +1080,7 @@ pathman_process_utility_hook(Node *first_arg, Oid relation_oid; PartType part_type; AttrNumber attr_number; + bool is_parent; /* Override standard COPY statement if needed */ if (is_pathman_related_copy(parsetree)) @@ -811,19 +1090,29 @@ pathman_process_utility_hook(Node *first_arg, /* Handle our COPY case (and show a special cmd name) */ PathmanDoCopy((CopyStmt *) parsetree, queryString, stmt_location, stmt_len, &processed); +#if PG_VERSION_NUM >= 130000 + if (queryCompletion) + SetQueryCompletion(queryCompletion, CMDTAG_COPY, processed); +#else if (completionTag) snprintf(completionTag, COMPLETION_TAG_BUFSIZE, - "PATHMAN COPY " UINT64_FORMAT, processed); + "COPY " UINT64_FORMAT, processed); +#endif return; /* don't call standard_ProcessUtility() or hooks */ } /* Override standard RENAME statement if needed */ else if (is_pathman_related_table_rename(parsetree, - &relation_oid)) + &relation_oid, + &is_parent)) { - PathmanRenameConstraint(relation_oid, - (const RenameStmt *) parsetree); + const RenameStmt *rename_stmt = (const RenameStmt *) parsetree; + + if (is_parent) + PathmanRenameSequence(relation_oid, rename_stmt); + else + PathmanRenameConstraint(relation_oid, rename_stmt); } /* Override standard ALTER COLUMN TYPE statement if needed */ @@ -837,19 +1126,115 @@ pathman_process_utility_hook(Node *first_arg, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot change type of column \"%s\"" " of table \"%s\" partitioned by HASH", - get_attname(relation_oid, attr_number), + get_attname_compat(relation_oid, attr_number), get_rel_name(relation_oid)))); - - /* Don't forget to invalidate parsed partitioning expression */ - pathman_config_invalidate_parsed_expression(relation_oid); } +#ifdef ENABLE_DECLARATIVE + else if (is_pathman_related_partitioning_cmd(parsetree, &relation_oid)) + { + /* we can handle all the partitioning commands in ALTER .. TABLE */ + if (IsA(parsetree, AlterTableStmt)) + { + ListCell *lc; + AlterTableStmt *stmt = (AlterTableStmt *) parsetree; + + foreach(lc, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc); + switch (cmd->subtype) + { + case AT_AttachPartition: + handle_attach_partition(relation_oid, cmd); + return; + case AT_DetachPartition: + handle_detach_partition(cmd); + return; + default: + elog(ERROR, "can't handle this command"); + } + } + } + else if (IsA(parsetree, CreateStmt)) + { + handle_create_partition_of(relation_oid, (CreateStmt *) parsetree); + return; + } + } +#endif } /* Finally call process_utility_hook_next or standard_ProcessUtility */ - call_process_utility_compat((process_utility_hook_next ? - process_utility_hook_next : +#if PG_VERSION_NUM >= 140000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + readOnlyTree, + context, params, queryEnv, + dest, queryCompletion); +#elif PG_VERSION_NUM >= 130000 + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : + standard_ProcessUtility), + first_arg, queryString, + context, params, queryEnv, + dest, queryCompletion); +#else + call_process_utility_compat((pathman_process_utility_hook_next ? + pathman_process_utility_hook_next : standard_ProcessUtility), first_arg, queryString, context, params, queryEnv, dest, completionTag); +#endif +} + +/* + * Planstate tree nodes could have been copied. + * It breaks references on correspoding + * ModifyTable node from PartitionRouter nodes. + */ +static void +fix_mt_refs(PlanState *state, void *context) +{ + ModifyTableState *mt_state = (ModifyTableState *) state; + PartitionRouterState *pr_state; +#if PG_VERSION_NUM < 140000 + int i; +#endif + + if (!IsA(state, ModifyTableState)) + return; +#if PG_VERSION_NUM >= 140000 + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + if (IsPartitionFilterState(pf_state)) + { + pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + pr_state->mt_state = mt_state; + } + } + } +} + +void +pathman_executor_start_hook(QueryDesc *queryDesc, int eflags) +{ + if (pathman_executor_start_hook_prev) + pathman_executor_start_hook_prev(queryDesc, eflags); + else + standard_ExecutorStart(queryDesc, eflags); + + /* + * HACK for compatibility with pgpro_stats. + * Fix possibly broken planstate tree. + */ + state_tree_visitor(queryDesc->planstate, fix_mt_refs, NULL); } diff --git a/src/include/compat/debug_compat_features.h b/src/include/compat/debug_compat_features.h index c668d4ce..09f12849 100644 --- a/src/include/compat/debug_compat_features.h +++ b/src/include/compat/debug_compat_features.h @@ -12,9 +12,4 @@ #define ENABLE_PGPRO_PATCHES /* PgPro exclusive features */ -//#define ENABLE_EXPAND_RTE_HOOK -//#define ENABLE_RELATION_TAGS #define ENABLE_PATHMAN_AWARE_COPY_WIN32 - -/* Hacks for vanilla */ -#define ENABLE_ROWMARKS_FIX diff --git a/src/include/compat/expand_rte_hook.h b/src/include/compat/expand_rte_hook.h deleted file mode 100644 index 51b57dd3..00000000 --- a/src/include/compat/expand_rte_hook.h +++ /dev/null @@ -1,37 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * expand_rte_hook.h - * Fix rowmarks etc using the 'expand_inherited_rtentry_hook' - * NOTE: this hook exists in PostgresPro - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef EXPAND_RTE_HOOK_H -#define EXPAND_RTE_HOOK_H - -#include "compat/debug_compat_features.h" - - -/* Does PostgreSQL have 'expand_inherited_rtentry_hook'? */ -/* TODO: fix this definition once PgPro contains 'expand_rte_hook' patch */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_EXPAND_RTE_HOOK) /* && ... */ -#define NATIVE_EXPAND_RTE_HOOK -#endif - - -#ifdef NATIVE_EXPAND_RTE_HOOK - -void init_expand_rte_hook(void); - -#else - -#define init_expand_rte_hook() ( (void) true ) - -#endif /* NATIVE_EXPAND_RTE_HOOK */ - - -#endif /* EXPAND_RTE_HOOK_H */ diff --git a/src/include/compat/pg_compat.h b/src/include/compat/pg_compat.h index 8dcc339a..f6330627 100644 --- a/src/include/compat/pg_compat.h +++ b/src/include/compat/pg_compat.h @@ -3,7 +3,7 @@ * pg_compat.h * Compatibility tools for PostgreSQL API * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -22,12 +22,24 @@ #include "compat/debug_compat_features.h" #include "postgres.h" +#include "access/tupdesc.h" +#include "commands/trigger.h" #include "executor/executor.h" #include "nodes/memnodes.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/pathnodes.h" +#else #include "nodes/relation.h" +#endif #include "nodes/pg_list.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/appendinfo.h" +#endif #include "optimizer/cost.h" #include "optimizer/paths.h" +#include "optimizer/pathnode.h" +#include "optimizer/prep.h" #include "utils/memutils.h" /* @@ -36,33 +48,97 @@ * ---------- */ +/* + * get_attname() + */ +#if PG_VERSION_NUM >= 110000 +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum), false) +#else +#define get_attname_compat(relid, attnum) \ + get_attname((relid), (attnum)) +#endif + + +/* + * calc_nestloop_required_outer + */ +#if PG_VERSION_NUM >= 110000 +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer)->parent->relids, PATH_REQ_OUTER(outer), \ + (inner)->parent->relids, PATH_REQ_OUTER(inner)) +#else +#define calc_nestloop_required_outer_compat(outer, inner) \ + calc_nestloop_required_outer((outer), (inner)) +#endif + /* * adjust_appendrel_attrs() */ -#if PG_VERSION_NUM >= 90600 +#if PG_VERSION_NUM >= 110000 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + (node), \ + 1, &(appinfo)) +#elif PG_VERSION_NUM >= 90500 +#define adjust_appendrel_attrs_compat(root, node, appinfo) \ + adjust_appendrel_attrs((root), \ + (node), \ + (appinfo)) +#endif + + +#if PG_VERSION_NUM >= 110000 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltarget->exprs = (List *) \ adjust_appendrel_attrs((root), \ (Node *) (src_rel)->reltarget->exprs, \ - (appinfo)); \ + 1, \ + &(appinfo)); \ + } while (0) +#elif PG_VERSION_NUM >= 90600 +#define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ + do { \ + (dst_rel)->reltarget->exprs = (List *) \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltarget->exprs, \ + (appinfo)); \ } while (0) #elif PG_VERSION_NUM >= 90500 #define adjust_rel_targetlist_compat(root, dst_rel, src_rel, appinfo) \ do { \ (dst_rel)->reltargetlist = (List *) \ - adjust_appendrel_attrs((root), \ - (Node *) (src_rel)->reltargetlist, \ - (appinfo)); \ + adjust_appendrel_attrs((root), \ + (Node *) (src_rel)->reltargetlist, \ + (appinfo)); \ } while (0) #endif +/* + * CheckValidResultRel() + */ +#if PG_VERSION_NUM >= 170000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd), NIL) +#elif PG_VERSION_NUM >= 100000 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri), (cmd)) +#elif PG_VERSION_NUM >= 90500 +#define CheckValidResultRelCompat(rri, cmd) \ + CheckValidResultRel((rri)->ri_RelationDesc, (cmd)) +#endif /* * BeginCopyFrom() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ + attnamelist, options) \ + BeginCopyFrom((pstate), (rel), NULL, (filename), (is_program), \ + (data_source_cb), (attnamelist), (options)) +#elif PG_VERSION_NUM >= 100000 #define BeginCopyFromCompat(pstate, rel, filename, is_program, data_source_cb, \ attnamelist, options) \ BeginCopyFrom((pstate), (rel), (filename), (is_program), \ @@ -106,7 +182,14 @@ * - in pg 10 PlannedStmt object * - in pg 9.6 and lower Node parsetree */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define call_process_utility_compat(process_utility, first_arg, query_string, \ + readOnlyTree, context, params, query_env, \ + dest, completion_tag) \ + (process_utility)((first_arg), (query_string), readOnlyTree, \ + (context), (params), \ + (query_env), (dest), (completion_tag)) +#elif PG_VERSION_NUM >= 100000 #define call_process_utility_compat(process_utility, first_arg, query_string, \ context, params, query_env, dest, \ completion_tag) \ @@ -160,30 +243,79 @@ /* * create_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, -1) +#elif PG_VERSION_NUM >= 130000 +/* + * PGPRO-3938 made create_append_path compatible with vanilla again + */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#elif PG_VERSION_NUM >= 120000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +/* TODO pgpro version? Looks like something is not ported yet */ +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 110000 + +#ifndef PGPRO_VERSION +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path(NULL, (rel), (subpaths), NIL, (required_outer), \ + (parallel_workers), false, NIL, -1, false, NIL) +#endif /* PGPRO_VERSION */ + +#elif PG_VERSION_NUM >= 100000 + +#ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL) +#else +#define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ + create_append_path((rel), (subpaths), (required_outer), (parallel_workers), NIL, \ + false, NIL) +#endif /* PGPRO_VERSION */ + #elif PG_VERSION_NUM >= 90600 #ifndef PGPRO_VERSION #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), (parallel_workers)) -#else /* ifdef PGPRO_VERSION */ +#else #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer), \ false, NIL, (parallel_workers)) -#endif /* PGPRO_VERSION */ +#endif /* PGPRO_VERSION */ #elif PG_VERSION_NUM >= 90500 #define create_append_path_compat(rel, subpaths, required_outer, parallel_workers) \ create_append_path((rel), (subpaths), (required_outer)) -#endif /* PG_VERSION_NUM */ +#endif /* PG_VERSION_NUM */ /* * create_merge_append_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ + required_outer) \ + create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ + (required_outer)) +#elif PG_VERSION_NUM >= 100000 #define create_merge_append_path_compat(root, rel, subpaths, pathkeys, \ required_outer) \ create_merge_append_path((root), (rel), (subpaths), (pathkeys), \ @@ -199,7 +331,7 @@ /* * create_nestloop_path() */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define create_nestloop_path_compat(root, joinrel, jointype, workspace, extra, \ outer, inner, filtered_joinclauses, pathkeys, \ required_outer) \ @@ -256,7 +388,7 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* - * ExecBuildProjectionInfo + * ExecBuildProjectionInfo() */ #if PG_VERSION_NUM >= 100000 #define ExecBuildProjectionInfoCompat(targetList, econtext, resultSlot, \ @@ -274,27 +406,74 @@ extern void create_plain_partial_paths(PlannerInfo *root, /* * ExecEvalExpr() - * NOTE: 'errmsg' specifies error string when ExecEvalExpr returns multiple values. */ #if PG_VERSION_NUM >= 100000 -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ +#define ExecEvalExprCompat(expr, econtext, isNull) \ ExecEvalExpr((expr), (econtext), (isNull)) #elif PG_VERSION_NUM >= 90500 -#include "partition_filter.h" +static inline Datum +ExecEvalExprCompat(ExprState *expr, ExprContext *econtext, bool *isnull) +{ + ExprDoneCond isdone; + Datum result = ExecEvalExpr(expr, econtext, isnull, &isdone); + + if (isdone != ExprSingleResult) + elog(ERROR, "expression should return single value"); + + return result; +} +#endif + -/* Variables for ExecEvalExprCompat() */ -extern Datum exprResult; -extern ExprDoneCond isDone; +/* + * ExecCheck() + */ +#if PG_VERSION_NUM < 100000 +static inline bool +ExecCheck(ExprState *state, ExprContext *econtext) +{ + Datum ret; + bool isnull; + MemoryContext old_mcxt; + + /* short-circuit (here and in ExecInitCheck) for empty restriction list */ + if (state == NULL) + return true; + + old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + ret = ExecEvalExprCompat(state, econtext, &isnull); + MemoryContextSwitchTo(old_mcxt); + + if (isnull) + return true; + + return DatumGetBool(ret); +} +#endif -/* Error handlers */ -static inline void mult_result_handler() { elog(ERROR, ERR_PART_ATTR_MULTIPLE_RESULTS); } -#define ExecEvalExprCompat(expr, econtext, isNull, errHandler) \ -( \ - exprResult = ExecEvalExpr((expr), (econtext), (isNull), &isDone), \ - (isDone != ExprSingleResult) ? (errHandler)() : (0), \ - exprResult \ -) +/* + * extract_actual_join_clauses() + */ +#if (PG_VERSION_NUM >= 100003) || \ + (PG_VERSION_NUM < 100000 && PG_VERSION_NUM >= 90609) || \ + (PG_VERSION_NUM < 90600 && PG_VERSION_NUM >= 90513) +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinrelids), \ + (joinquals), \ + (otherquals)) +#else +#define extract_actual_join_clauses_compat(restrictinfo_list, \ + joinrelids, \ + joinquals, \ + otherquals) \ + extract_actual_join_clauses((restrictinfo_list), \ + (joinquals), \ + (otherquals)) #endif @@ -351,12 +530,12 @@ extern List *get_all_actual_clauses(List *restrictinfo_list); * get_rel_persistence() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -char get_rel_persistence(Oid relid); +char get_rel_persistence(Oid relid); #endif /* - * initial_cost_nestloop + * initial_cost_nestloop() */ #if PG_VERSION_NUM >= 100000 || (defined(PGPRO_VERSION) && PG_VERSION_NUM >= 90603) #define initial_cost_nestloop_compat(root, workspace, jointype, outer_path, \ @@ -372,7 +551,7 @@ char get_rel_persistence(Oid relid); /* - * InitResultRelInfo + * InitResultRelInfo() * * for v10 set NULL into 'partition_root' argument to specify that result * relation is not vanilla partition @@ -390,12 +569,22 @@ char get_rel_persistence(Oid relid); #endif +/* + * ItemPointerIndicatesMovedPartitions() + * + * supported since v11, provide a stub for previous versions. + */ +#if PG_VERSION_NUM < 110000 +#define ItemPointerIndicatesMovedPartitions(ctid) ( false ) +#endif + + /* * make_restrictinfo() */ #if PG_VERSION_NUM >= 100000 -extern List * make_restrictinfos_from_actual_clauses(PlannerInfo *root, - List *clause_list); +extern List *make_restrictinfos_from_actual_clauses(PlannerInfo *root, + List *clause_list); #endif @@ -418,9 +607,9 @@ extern Result *make_result(List *tlist, * McxtStatsInternal() */ #if PG_VERSION_NUM >= 90600 -void McxtStatsInternal(MemoryContext context, int level, - bool examine_children, - MemoryContextCounters *totals); +void McxtStatsInternal(MemoryContext context, int level, + bool examine_children, + MemoryContextCounters *totals); #endif @@ -428,7 +617,7 @@ void McxtStatsInternal(MemoryContext context, int level, * oid_cmp() */ #if PG_VERSION_NUM >=90500 && PG_VERSION_NUM < 100000 -extern int oid_cmp(const void *p1, const void *p2); +extern int oid_cmp(const void *p1, const void *p2); #endif @@ -437,7 +626,12 @@ extern int oid_cmp(const void *p1, const void *p2); * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ + query_env) \ + parse_analyze_fixedparams((RawStmt *) (parse_tree), (query_string), (param_types), \ + (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define parse_analyze_compat(parse_tree, query_string, param_types, nparams, \ query_env) \ parse_analyze((RawStmt *) (parse_tree), (query_string), (param_types), \ @@ -451,11 +645,16 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * pg_analyze_and_rewrite + * pg_analyze_and_rewrite() * * for v10 cast first arg to RawStmt type */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 150000 /* for commit 791b1b71da35 */ +#define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ + nparams, query_env) \ + pg_analyze_and_rewrite_fixedparams((RawStmt *) (parsetree), (query_string), \ + (param_types), (nparams), (query_env)) +#elif PG_VERSION_NUM >= 100000 #define pg_analyze_and_rewrite_compat(parsetree, query_string, param_types, \ nparams, query_env) \ pg_analyze_and_rewrite((RawStmt *) (parsetree), (query_string), \ @@ -469,11 +668,24 @@ extern int oid_cmp(const void *p1, const void *p2); /* - * ProcessUtility + * ProcessUtility() * * for v10 set NULL into 'queryEnv' argument */ -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +#define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ + completionTag) \ + do { \ + PlannedStmt *stmt = makeNode(PlannedStmt); \ + stmt->commandType = CMD_UTILITY; \ + stmt->canSetTag = true; \ + stmt->utilityStmt = (parsetree); \ + stmt->stmt_location = -1; \ + stmt->stmt_len = 0; \ + ProcessUtility(stmt, (queryString), false, (context), (params), NULL, \ + (dest), (completionTag)); \ + } while (0) +#elif PG_VERSION_NUM >= 100000 #define ProcessUtilityCompat(parsetree, queryString, context, params, dest, \ completionTag) \ do { \ @@ -510,7 +722,7 @@ extern int oid_cmp(const void *p1, const void *p2); * set_dummy_rel_pathlist() */ #if PG_VERSION_NUM >= 90500 && PG_VERSION_NUM < 90600 -void set_dummy_rel_pathlist(RelOptInfo *rel); +void set_dummy_rel_pathlist(RelOptInfo *rel); #endif @@ -532,6 +744,10 @@ extern void set_rel_consider_parallel(PlannerInfo *root, * in compat version the type of first argument is (Expr *) */ #if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 /* function removed in + * 375398244168add84a884347625d14581a421e71 */ +extern TargetEntry *tlist_member_ignore_relabel(Expr *node, List *targetlist); +#endif #define tlist_member_ignore_relabel_compat(expr, targetlist) \ tlist_member_ignore_relabel((expr), (targetlist)) #elif PG_VERSION_NUM >= 90500 @@ -540,13 +756,493 @@ extern void set_rel_consider_parallel(PlannerInfo *root, #endif +/* + * convert_tuples_by_name_map() + */ +#if (PG_VERSION_NUM >= 90500 && PG_VERSION_NUM <= 90505) || \ + (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM <= 90601) +extern AttrNumber *convert_tuples_by_name_map(TupleDesc indesc, + TupleDesc outdesc, + const char *msg); +#else +#include "access/tupconvert.h" +#endif + +/* + * ExecBRUpdateTriggers() + */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL, NULL) +#elif PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot), NULL) +#else +#define ExecBRUpdateTriggersCompat(estate, epqstate, relinfo, \ + tupleid, fdw_trigtuple, newslot) \ + ExecBRUpdateTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (newslot)) +#endif + +/* + * ExecARInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), \ + (recheck_indexes), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecARInsertTriggersCompat(estate, relinfo, trigtuple, \ + recheck_indexes, transition_capture) \ + ExecARInsertTriggers((estate), (relinfo), (trigtuple), (recheck_indexes)) +#endif + + +/* + * ExecBRDeleteTriggers() + */ +#if PG_VERSION_NUM >= 160000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot), NULL, NULL) +#elif PG_VERSION_NUM >= 110000 +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple), (epqslot)) +#else +#define ExecBRDeleteTriggersCompat(estate, epqstate, relinfo, tupleid, \ + fdw_trigtuple, epqslot) \ + ExecBRDeleteTriggers((estate), (epqstate), (relinfo), (tupleid), \ + (fdw_trigtuple)) +#endif + + +/* + * ExecARDeleteTriggers() + */ +#if PG_VERSION_NUM >= 150000 /* for commit ba9a7e392171 */ +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture), false) +#elif PG_VERSION_NUM >= 100000 +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), \ + (fdw_trigtuple), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecARDeleteTriggersCompat(estate, relinfo, tupleid, \ + fdw_trigtuple, transition_capture) \ + ExecARDeleteTriggers((estate), (relinfo), (tupleid), (fdw_trigtuple)) +#endif + + +/* + * ExecASInsertTriggers() + */ +#if PG_VERSION_NUM >= 100000 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo), (transition_capture)) +#elif PG_VERSION_NUM >= 90500 +#define ExecASInsertTriggersCompat(estate, relinfo, transition_capture) \ + ExecASInsertTriggers((estate), (relinfo)) +#endif + + +/* + * map_variable_attnos() + */ +#if PG_VERSION_NUM >= 100000 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (to_rowtype), (found_wholerow)) +#elif PG_VERSION_NUM >= 90500 +#define map_variable_attnos_compat(node, varno, \ + sublevels_up, map, map_len, \ + to_rowtype, found_wholerow) \ + map_variable_attnos((node), (varno), \ + (sublevels_up), (map), (map_len), \ + (found_wholerow)) +#endif + +#ifndef TupleDescAttr +#define TupleDescAttr(tupdesc, i) ((tupdesc)->attrs[(i)]) +#endif + + +/* + * RegisterCustomScanMethods() + */ +#if PG_VERSION_NUM < 90600 +#define RegisterCustomScanMethods(methods) +#endif + +/* + * MakeTupleTableSlot() + */ +#if PG_VERSION_NUM >= 120000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL, (tts_ops)) +#elif PG_VERSION_NUM >= 110000 +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot(NULL) +#else +#define MakeTupleTableSlotCompat(tts_ops) \ + MakeTupleTableSlot() +#endif + +/* + * BackgroundWorkerInitializeConnectionByOid() + */ +#if PG_VERSION_NUM >= 110000 +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid), 0) +#else +#define BackgroundWorkerInitializeConnectionByOidCompat(dboid, useroid) \ + BackgroundWorkerInitializeConnectionByOid((dboid), (useroid)) +#endif + +/* + * heap_delete() + */ +#if PG_VERSION_NUM >= 110000 +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd), (changing_part)) +#else +#define heap_delete_compat(relation, tid, cid, crosscheck, \ + wait, hufd, changing_part) \ + heap_delete((relation), (tid), (cid), (crosscheck), \ + (wait), (hufd)) +#endif + +/* + * compute_parallel_worker() + */ +#if PG_VERSION_NUM >= 110000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages), \ + max_parallel_workers_per_gather) +#elif PG_VERSION_NUM >= 100000 +#define compute_parallel_worker_compat(rel, heap_pages, index_pages) \ + compute_parallel_worker((rel), (heap_pages), (index_pages)) +#endif + + +/* + * generate_gather_paths() + */ +#if PG_VERSION_NUM >= 110000 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel), false) +#elif PG_VERSION_NUM >= 90600 +#define generate_gather_paths_compat(root, rel) \ + generate_gather_paths((root), (rel)) +#else +#define generate_gather_paths_compat(root, rel) +#endif + + +/* + * find_childrel_appendrelinfo() + */ +#if PG_VERSION_NUM >= 110000 +#define find_childrel_appendrelinfo_compat(root, rel) \ + ((root)->append_rel_array[(rel)->relid]) +#else +#define find_childrel_appendrelinfo_compat(root, rel) \ + find_childrel_appendrelinfo((root), (rel)) +#endif + +/* + * HeapTupleGetXmin() + * Vanilla PostgreSQL has HeaptTupleHeaderGetXmin, but for 64-bit xid + * we need access to entire tuple, not just its header. + */ +#ifdef XID_IS_64BIT +#define HeapTupleGetXminCompat(htup) HeapTupleGetXmin(htup) +#else +#define HeapTupleGetXminCompat(htup) HeapTupleHeaderGetXmin((htup)->t_data) +#endif + +/* + * is_andclause + */ +#if PG_VERSION_NUM >= 120000 +#define is_andclause_compat(clause) is_andclause(clause) +#else +#define is_andclause_compat(clause) and_clause(clause) +#endif + +/* + * GetDefaultTablespace + */ +#if PG_VERSION_NUM >= 120000 +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence), (partitioned)) +#else +#define GetDefaultTablespaceCompat(relpersistence, partitioned) \ + GetDefaultTablespace((relpersistence)) +#endif + +/* + * CreateTemplateTupleDesc + */ +#if PG_VERSION_NUM >= 120000 +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc(natts) +#else +#define CreateTemplateTupleDescCompat(natts, hasoid) CreateTemplateTupleDesc((natts), (hasoid)) +#endif + +/* + * addRangeTableEntryForRelation + */ +#if PG_VERSION_NUM >= 120000 +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (lockmode), (alias), (inh), (inFromCl)) +#else +#define addRangeTableEntryForRelationCompat(pstate, rel, lockmode, alias, inh, inFromCl) \ + addRangeTableEntryForRelation((pstate), (rel), (alias), (inh), (inFromCl)) +#endif + +/* + * nextCopyFrom (WITH_OIDS removed) + */ +#if PG_VERSION_NUM >= 120000 +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls)) +#else +#define NextCopyFromCompat(cstate, econtext, values, nulls, tupleOid) \ + NextCopyFrom((cstate), (econtext), (values), (nulls), (tupleOid)) +#endif + +/* + * ExecInsertIndexTuples. Since 12 slot contains tupleid. + * Since 14: new fields "resultRelInfo", "update". + * Since 16: new bool field "onlySummarizing". + */ +#if PG_VERSION_NUM >= 160000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes), (onlySummarizing)) +#elif PG_VERSION_NUM >= 140000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((resultRelInfo), (slot), (estate), (update), (noDupError), (specConflict), (arbiterIndexes)) +#elif PG_VERSION_NUM >= 120000 +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((slot), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#else +#define ExecInsertIndexTuplesCompat(resultRelInfo, slot, tupleid, estate, update, noDupError, specConflict, arbiterIndexes, onlySummarizing) \ + ExecInsertIndexTuples((slot), (tupleid), (estate), (noDupError), (specConflict), (arbiterIndexes)) +#endif + +/* + * RenameRelationInternal + */ +#if PG_VERSION_NUM >= 120000 +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal), (is_index)) +#else +#define RenameRelationInternalCompat(myrelid, newname, is_internal, is_index) \ + RenameRelationInternal((myrelid), (newname), (is_internal)) +#endif + +/* + * getrelid + */ +#if PG_VERSION_NUM >= 120000 +#define getrelid(rangeindex,rangetable) \ + (rt_fetch(rangeindex, rangetable)->relid) +#endif + +/* + * AddRelationNewConstraints + */ +#if PG_VERSION_NUM >= 120000 +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal), NULL) +#else +#define AddRelationNewConstraintsCompat(rel, newColDefaults, newConstrains, allow_merge, is_local, is_internal) \ + AddRelationNewConstraints((rel), (newColDefaults), (newConstrains), (allow_merge), (is_local), (is_internal)) +#endif + +/* + * [PGPRO-3725] Since 11.7 and 12.1 in pgpro standard and ee PGPRO-2843 + * appeared, changing the signature, wow. There is no numeric pgpro edition + * macro (and never will be, for old versions), so distinguish via macro added + * by the commit. + */ +#if defined(QTW_DONT_COPY_DEFAULT) && (PG_VERSION_NUM < 140000) +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context), 0) +#else +#define expression_tree_mutator_compat(node, mutator, context) \ + expression_tree_mutator((node), (mutator), (context)) +#endif + +/* + * stringToQualifiedNameList + */ +#if PG_VERSION_NUM >= 160000 +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string), NULL) +#else +#define stringToQualifiedNameListCompat(string) \ + stringToQualifiedNameList((string)) +#endif + /* * ------------- * Common code * ------------- */ +#if PG_VERSION_NUM >= 120000 +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlot((estate), (tdesc), (tts_ops)); +#else +#define ExecInitExtraTupleSlotCompat(estate, tdesc, tts_ops) \ + ExecInitExtraTupleSlotCompatHorse((estate), (tdesc)) +static inline TupleTableSlot * +ExecInitExtraTupleSlotCompatHorse(EState *s, TupleDesc t) +{ +#if PG_VERSION_NUM >= 110000 + return ExecInitExtraTupleSlot(s, t); +#else + TupleTableSlot *res = ExecInitExtraTupleSlot(s); + + if (t) + ExecSetSlotDescriptor(res, t); + + return res; +#endif +} +#endif + +/* See ExecEvalParamExtern() */ +static inline ParamExternData * +CustomEvalParamExternCompat(Param *param, + ParamListInfo params, + ParamExternData *prmdata) +{ + ParamExternData *prm; + +#if PG_VERSION_NUM >= 110000 + if (params->paramFetch != NULL) + prm = params->paramFetch(params, param->paramid, false, prmdata); + else + prm = ¶ms->params[param->paramid - 1]; +#else + prm = ¶ms->params[param->paramid - 1]; + + if (!OidIsValid(prm->ptype) && params->paramFetch != NULL) + params->paramFetch(params, param->paramid); +#endif + + return prm; +} -void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +void set_append_rel_size_compat(PlannerInfo *root, RelOptInfo *rel, Index rti); +/* + * lnext() + * In >=13 list implementation was reworked (1cff1b95ab6) + */ +#if PG_VERSION_NUM >= 130000 +#define lnext_compat(l, lc) lnext((l), (lc)) +#else +#define lnext_compat(l, lc) lnext((lc)) +#endif + +/* + * heap_open() + * heap_openrv() + * heap_close() + * In >=13 heap_* was replaced with table_* (e0c4ec07284) + */ +#if PG_VERSION_NUM >= 130000 +#define heap_open_compat(r, l) table_open((r), (l)) +#define heap_openrv_compat(r, l) table_openrv((r), (l)) +#define heap_close_compat(r, l) table_close((r), (l)) +#else +#define heap_open_compat(r, l) heap_open((r), (l)) +#define heap_openrv_compat(r, l) heap_openrv((r), (l)) +#define heap_close_compat(r, l) heap_close((r), (l)) +#endif + +/* + * convert_tuples_by_name() + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + */ +#if PG_VERSION_NUM >= 130000 +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o)) +#else +#define convert_tuples_by_name_compat(i, o, m) convert_tuples_by_name((i), (o), (m)) +#endif + +/* + * raw_parser() + * In 14 new argument was added (844fe9f159a) + */ +#if PG_VERSION_NUM >= 140000 +#define raw_parser_compat(s) raw_parser((s), RAW_PARSE_DEFAULT) +#else +#define raw_parser_compat(s) raw_parser(s) +#endif + +/* + * make_restrictinfo() + * In >=16 4th, 5th and 9th arguments were added (991a3df227e) + * In >=16 3th and 9th arguments were removed (b448f1c8d83) + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 160000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), false, false, (p), (sl), (rr), NULL, (or)) +#else +#if PG_VERSION_NUM >= 140000 +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((r), (c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#else +#define make_restrictinfo_compat(r, c, ipd, od, p, sl, rr, or, nr) make_restrictinfo((c), (ipd), (od), (p), (sl), (rr), (or), (nr)) +#endif /* #if PG_VERSION_NUM >= 140000 */ +#endif /* #if PG_VERSION_NUM >= 160000 */ + +/* + * pull_varnos() + * In >=14 new argument was added (55dc86eca70) + */ +#if PG_VERSION_NUM >= 140000 +#define pull_varnos_compat(r, n) pull_varnos((r), (n)) +#else +#define pull_varnos_compat(r, n) pull_varnos(n) +#endif + +/* + * build_expression_pathkey() + * In >=16 argument was removed (b448f1c8d83) + */ +#if PG_VERSION_NUM >= 160000 +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, opno, rel, create_it) +#else +#define build_expression_pathkey_compat(root, expr, nullable_relids, opno, rel, create_it) build_expression_pathkey(root, expr, nullable_relids, opno, rel, create_it) +#endif + +/* + * EvalPlanQualInit() + * In >=16 argument was added (70b42f27902) + */ +#if PG_VERSION_NUM >= 160000 +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam, NIL) +#else +#define EvalPlanQualInit_compat(epqstate, parentestate, subplan, auxrowmarks, epqParam) EvalPlanQualInit(epqstate, parentestate, subplan, auxrowmarks, epqParam) +#endif -#endif /* PG_COMPAT_H */ +#endif /* PG_COMPAT_H */ diff --git a/src/include/compat/relation_tags.h b/src/include/compat/relation_tags.h deleted file mode 100644 index cbd80b82..00000000 --- a/src/include/compat/relation_tags.h +++ /dev/null @@ -1,72 +0,0 @@ -/* ------------------------------------------------------------------------ - * - * relation_tags.h - * Attach custom (Key, Value) pairs to an arbitrary RangeTblEntry - * NOTE: implementations for vanilla and PostgresPro differ - * - * Copyright (c) 2017, Postgres Professional - * - * ------------------------------------------------------------------------ - */ - -#ifndef RELATION_TAGS_H -#define RELATION_TAGS_H - -#include "compat/debug_compat_features.h" - -#include "postgres.h" -#include "nodes/relation.h" -#include "nodes/value.h" -#include "utils/memutils.h" - - -/* Does RTE contain 'custom_tags' list? */ -/* TODO: fix this definition once PgPro contains 'relation_tags' patch */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_RELATION_TAGS) /* && ... */ -#define NATIVE_RELATION_TAGS -#endif - -/* Memory context we're going to use for tags */ -#define RELATION_TAG_MCXT TopTransactionContext - - -/* Safe TAG constructor (Integer) */ -static inline List * -make_rte_tag_int(char *key, int value) -{ - List *kvp; - MemoryContext old_mcxt; - - /* Allocate TAG in a persistent memory context */ - old_mcxt = MemoryContextSwitchTo(RELATION_TAG_MCXT); - kvp = list_make2(makeString(key), makeInteger(value)); - MemoryContextSwitchTo(old_mcxt); - - return kvp; -} - - -List *rte_fetch_tag(const uint32 query_id, - const RangeTblEntry *rte, - const char *key); - -List *rte_attach_tag(const uint32 query_id, - RangeTblEntry *rte, - List *key_value_pair); - - -List *relation_tags_search(List *custom_tags, - const char *key); - -void rte_deconstruct_tag(const List *key_value_pair, - const char **key, - const Value **value); - - -void incr_refcount_relation_tags(void); -uint32 get_refcount_relation_tags(void); -void decr_refcount_relation_tags(void); - - -#endif /* RELATION_TAGS_H */ diff --git a/src/include/compat/rowmarks_fix.h b/src/include/compat/rowmarks_fix.h index 4e441388..c94504c3 100644 --- a/src/include/compat/rowmarks_fix.h +++ b/src/include/compat/rowmarks_fix.h @@ -13,31 +13,49 @@ #define ROWMARKS_FIX_H #include "compat/debug_compat_features.h" -#include "compat/expand_rte_hook.h" #include "postgres.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM < 120000 +#include "nodes/relation.h" +#else +#include "optimizer/optimizer.h" +#endif + + +#if PG_VERSION_NUM >= 90600 +void append_tle_for_rowmark(PlannerInfo *root, PlanRowMark *rc); + +#else /* - * If PostgreSQL supports 'expand_inherited_rtentry_hook', - * our hacks are completely unnecessary. + * Starting from 9.6, it's possible to append junk + * tableoid columns using the PlannerInfo->processed_tlist. + * This is absolutely crucial for UPDATE and DELETE queries, + * so we had to add some special fixes for 9.5: + * + * 1) disable dangerous UPDATE & DELETE optimizations. + * 2) disable optimizations for SELECT .. FOR UPDATE etc. */ -#if defined(ENABLE_PGPRO_PATCHES) && \ - defined(ENABLE_ROWMARKS_FIX) && \ - defined(NATIVE_EXPAND_RTE_HOOK /* dependency */ ) -#define NATIVE_PARTITIONING_ROWMARKS -#endif +#define LEGACY_ROWMARKS_95 + +#define append_tle_for_rowmark(root, rc) ( (void) true ) +#endif -#ifdef NATIVE_PARTITIONING_ROWMARKS -#define postprocess_lock_rows(rtable, plan) ( (void) true ) -#define rowmark_add_tableoids(parse) ( (void) true ) +/* + * add_vars_to_targetlist() + * In >=16 last argument was removed (b3ff6c742f6c) + */ +#if PG_VERSION_NUM >= 160000 +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed)); #else -void postprocess_lock_rows(List *rtable, Plan *plan); -void rowmark_add_tableoids(Query *parse); -#endif /* NATIVE_PARTITIONING_ROWMARKS */ +#define add_vars_to_targetlist_compat(root, vars, where_needed) \ + add_vars_to_targetlist((root), (vars), (where_needed), true); +#endif #endif /* ROWMARKS_FIX_H */ diff --git a/src/include/declarative.h b/src/include/declarative.h new file mode 100644 index 00000000..ee4ea40b --- /dev/null +++ b/src/include/declarative.h @@ -0,0 +1,16 @@ +#ifndef DECLARATIVE_H +#define DECLARATIVE_H + +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" + +void modify_declarative_partitioning_query(Query *query); +bool is_pathman_related_partitioning_cmd(Node *parsetree, Oid *parent_relid); + +/* actual actions */ +void handle_attach_partition(Oid parent_relid, AlterTableCmd *cmd); +void handle_detach_partition(AlterTableCmd *cmd); +void handle_create_partition_of(Oid parent_relid, CreateStmt *stmt); + +#endif /* DECLARATIVE_H */ diff --git a/src/include/hooks.h b/src/include/hooks.h index 6a312db3..4d426f5a 100644 --- a/src/include/hooks.h +++ b/src/include/hooks.h @@ -3,7 +3,7 @@ * hooks.h * prototypes of rel_pathlist and join_pathlist hooks * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -13,6 +13,7 @@ #include "postgres.h" +#include "executor/executor.h" #include "optimizer/planner.h" #include "optimizer/paths.h" #include "parser/analyze.h" @@ -20,12 +21,14 @@ #include "tcop/utility.h" -extern set_join_pathlist_hook_type set_join_pathlist_next; -extern set_rel_pathlist_hook_type set_rel_pathlist_hook_next; -extern planner_hook_type planner_hook_next; -extern post_parse_analyze_hook_type post_parse_analyze_hook_next; -extern shmem_startup_hook_type shmem_startup_hook_next; -extern ProcessUtility_hook_type process_utility_hook_next; +extern set_join_pathlist_hook_type pathman_set_join_pathlist_next; +extern set_rel_pathlist_hook_type pathman_set_rel_pathlist_hook_next; +extern planner_hook_type pathman_planner_hook_next; +extern post_parse_analyze_hook_type pathman_post_parse_analyze_hook_next; +extern shmem_startup_hook_type pathman_shmem_startup_hook_next; +extern ProcessUtility_hook_type pathman_process_utility_hook_next; +extern ExecutorRun_hook_type pathman_executor_run_hook_next; +extern ExecutorStart_hook_type pathman_executor_start_hook_prev; void pathman_join_pathlist_hook(PlannerInfo *root, @@ -40,20 +43,47 @@ void pathman_rel_pathlist_hook(PlannerInfo *root, Index rti, RangeTblEntry *rte); -void pathman_enable_assign_hook(char newval, void *extra); +void pathman_enable_assign_hook(bool newval, void *extra); +bool pathman_enable_check_hook(bool *newval, void **extra, GucSource source); PlannedStmt * pathman_planner_hook(Query *parse, +#if PG_VERSION_NUM >= 130000 + const char *query_string, +#endif int cursorOptions, ParamListInfo boundParams); -void pathman_post_parse_analysis_hook(ParseState *pstate, +#if PG_VERSION_NUM >= 140000 +void pathman_post_parse_analyze_hook(ParseState *pstate, + Query *query, + JumbleState *jstate); +#else +void pathman_post_parse_analyze_hook(ParseState *pstate, Query *query); +#endif void pathman_shmem_startup_hook(void); void pathman_relcache_hook(Datum arg, Oid relid); -#if PG_VERSION_NUM >= 100000 +#if PG_VERSION_NUM >= 140000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 130000 +void pathman_process_utility_hook(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc); +#elif PG_VERSION_NUM >= 100000 void pathman_process_utility_hook(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, @@ -70,5 +100,23 @@ void pathman_process_utility_hook(Node *parsetree, char *completionTag); #endif +#if PG_VERSION_NUM >= 90600 +typedef uint64 ExecutorRun_CountArgType; +#else +typedef long ExecutorRun_CountArgType; +#endif + +#if PG_VERSION_NUM >= 100000 +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count, + bool execute_once); +#else +void pathman_executor_hook(QueryDesc *queryDesc, + ScanDirection direction, + ExecutorRun_CountArgType count); +#endif +void pathman_executor_start_hook(QueryDesc *queryDescc, + int eflags); #endif /* PATHMAN_HOOKS_H */ diff --git a/src/include/init.h b/src/include/init.h index c1a1041c..58335c46 100644 --- a/src/include/init.h +++ b/src/include/init.h @@ -46,21 +46,21 @@ typedef struct do { \ Assert(CurrentMemoryContext != TopMemoryContext); \ Assert(CurrentMemoryContext != TopPathmanContext); \ - Assert(CurrentMemoryContext != PathmanRelationCacheContext); \ - Assert(CurrentMemoryContext != PathmanParentCacheContext); \ - Assert(CurrentMemoryContext != PathmanBoundCacheContext); \ + Assert(CurrentMemoryContext != PathmanParentsCacheContext); \ + Assert(CurrentMemoryContext != PathmanStatusCacheContext); \ + Assert(CurrentMemoryContext != PathmanBoundsCacheContext); \ } while (0) #define PATHMAN_MCXT_COUNT 4 extern MemoryContext TopPathmanContext; -extern MemoryContext PathmanRelationCacheContext; -extern MemoryContext PathmanParentCacheContext; -extern MemoryContext PathmanBoundCacheContext; +extern MemoryContext PathmanParentsCacheContext; +extern MemoryContext PathmanStatusCacheContext; +extern MemoryContext PathmanBoundsCacheContext; -extern HTAB *partitioned_rels; -extern HTAB *parent_cache; -extern HTAB *bound_cache; +extern HTAB *parents_cache; +extern HTAB *status_cache; +extern HTAB *bounds_cache; /* pg_pathman's initialization state */ extern PathmanInitState pathman_init_state; @@ -69,28 +69,31 @@ extern PathmanInitState pathman_init_state; extern bool pathman_hooks_enabled; +#define PATHMAN_TOP_CONTEXT "maintenance" +#define PATHMAN_PARENTS_CACHE "partition parents cache" +#define PATHMAN_STATUS_CACHE "partition status cache" +#define PATHMAN_BOUNDS_CACHE "partition bounds cache" + + /* Transform pg_pathman's memory context into simple name */ static inline const char * -simpify_mcxt_name(MemoryContext mcxt) +simplify_mcxt_name(MemoryContext mcxt) { - static const char *top_mcxt = "maintenance", - *rel_mcxt = "partition dispatch cache", - *parent_mcxt = "partition parents cache", - *bound_mcxt = "partition bounds cache"; - if (mcxt == TopPathmanContext) - return top_mcxt; + return PATHMAN_TOP_CONTEXT; - else if (mcxt == PathmanRelationCacheContext) - return rel_mcxt; + else if (mcxt == PathmanParentsCacheContext) + return PATHMAN_PARENTS_CACHE; - else if (mcxt == PathmanParentCacheContext) - return parent_mcxt; + else if (mcxt == PathmanStatusCacheContext) + return PATHMAN_STATUS_CACHE; - else if (mcxt == PathmanBoundCacheContext) - return bound_mcxt; + else if (mcxt == PathmanBoundsCacheContext) + return PATHMAN_BOUNDS_CACHE; - else elog(ERROR, "error in function " CppAsString(simpify_mcxt_name)); + else elog(ERROR, "unknown memory context"); + + return NULL; /* keep compiler quiet */ } @@ -138,7 +141,7 @@ simpify_mcxt_name(MemoryContext mcxt) pathman_init_state.pg_pathman_enable = false; \ pathman_init_state.auto_partition = false; \ pathman_init_state.override_copy = false; \ - pathman_init_state.initialization_needed = true; \ + unload_config(); \ } while (0) @@ -153,11 +156,11 @@ simpify_mcxt_name(MemoryContext mcxt) #define DEFAULT_PATHMAN_OVERRIDE_COPY true -/* Lowest version of Pl/PgSQL frontend compatible with internals (0xAA_BB_CC) */ -#define LOWEST_COMPATIBLE_FRONT 0x010400 +/* Lowest version of Pl/PgSQL frontend compatible with internals */ +#define LOWEST_COMPATIBLE_FRONT "1.5.0" -/* Current version of native C library (0xAA_BB_CC) */ -#define CURRENT_LIB_VERSION 0x010400 +/* Current version of native C library */ +#define CURRENT_LIB_VERSION "1.5.12" void *pathman_cache_search_relid(HTAB *cache_table, @@ -168,8 +171,8 @@ void *pathman_cache_search_relid(HTAB *cache_table, /* * Save and restore PathmanInitState. */ -void save_pathman_init_state(PathmanInitState *temp_init_state); -void restore_pathman_init_state(const PathmanInitState *temp_init_state); +void save_pathman_init_state(volatile PathmanInitState *temp_init_state); +void restore_pathman_init_state(const volatile PathmanInitState *temp_init_state); /* * Create main GUC variables. @@ -201,12 +204,12 @@ find_children_status find_inheritance_children_array(Oid parentrelId, char *build_check_constraint_name_relid_internal(Oid relid); char *build_check_constraint_name_relname_internal(const char *relname); -char *build_sequence_name_internal(Oid relid); +char *build_sequence_name_relid_internal(Oid relid); +char *build_sequence_name_relname_internal(const char *relname); char *build_update_trigger_name_internal(Oid relid); char *build_update_trigger_func_name_internal(Oid relid); - bool pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, @@ -225,8 +228,6 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull); -Oid *read_parent_oids(int *nelems); - bool validate_range_constraint(const Expr *expr, const PartRelationInfo *prel, diff --git a/src/include/partition_creation.h b/src/include/partition_creation.h index 4a93bbfe..cc666923 100644 --- a/src/include/partition_creation.h +++ b/src/include/partition_creation.h @@ -24,8 +24,7 @@ /* Create RANGE partitions to store some value */ Oid create_partitions_for_value(Oid relid, Datum value, Oid value_type); -Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker); +Oid create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type); /* Create one RANGE partition */ @@ -81,13 +80,6 @@ void drop_pathman_check_constraint(Oid relid); void add_pathman_check_constraint(Oid relid, Constraint *constraint); -/* Update triggers */ -void create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns); - -bool has_update_trigger_internal(Oid parent); - /* Partitioning callback type */ typedef enum { diff --git a/src/include/partition_filter.h b/src/include/partition_filter.h index cccacf2f..4aae0bbb 100644 --- a/src/include/partition_filter.h +++ b/src/include/partition_filter.h @@ -3,7 +3,7 @@ * partition_filter.h * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -25,11 +25,19 @@ #endif +#define INSERT_NODE_NAME "PartitionFilter" + + #define ERR_PART_ATTR_NULL "partitioning expression's value should not be NULL" -#define ERR_PART_ATTR_MULTIPLE_RESULTS "partitioning expression should return single value" #define ERR_PART_ATTR_NO_PART "no suitable partition for key '%s'" -#define ERR_PART_ATTR_MULTIPLE "PartitionFilter selected more than one partition" +#define ERR_PART_ATTR_MULTIPLE INSERT_NODE_NAME " selected more than one partition" +#if PG_VERSION_NUM < 130000 +/* + * In >=13 msg parameter in convert_tuples_by_name function was removed (fe66125974c) + * and ERR_PART_DESC_CONVERT become unusable + */ #define ERR_PART_DESC_CONVERT "could not convert row type for partition" +#endif /* @@ -37,49 +45,66 @@ */ typedef struct { - Oid partid; /* partition's relid */ - ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ - TupleConversionMap *tuple_map; /* tuple conversion map (parent => child) */ + Oid partid; /* partition's relid */ + ResultRelInfo *result_rel_info; /* cached ResultRelInfo */ + TupleConversionMap *tuple_map; /* tuple mapping (parent => child) */ + TupleConversionMap *tuple_map_child; /* tuple mapping (child => child), for exclude 'ctid' */ + + PartRelationInfo *prel; /* this child might be a parent... */ + ExprState *prel_expr_state; /* and have its own part. expression */ } ResultRelInfoHolder; -/* Forward declaration (for on_new_rri_holder()) */ +/* Default settings for ResultPartsStorage */ +#define RPS_DEFAULT_SPECULATIVE false /* speculative inserts */ +#define RPS_CLOSE_RELATIONS true +#define RPS_SKIP_RELATIONS false + +/* Neat wrapper for readability */ +#define RPS_RRI_CB(cb, args) (cb), ((void *) args) + + +/* Forward declaration (for on_rri_holder()) */ struct ResultPartsStorage; typedef struct ResultPartsStorage ResultPartsStorage; /* - * Callback to be fired at rri_holder creation. + * Callback to be fired at rri_holder creation/destruction. */ -typedef void (*on_new_rri_holder)(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +typedef void (*rri_holder_cb)(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* * Cached ResultRelInfos of partitions. */ struct ResultPartsStorage { - ResultRelInfo *saved_rel_info; /* original ResultRelInfo (parent) */ + ResultRelInfo *base_rri; /* original ResultRelInfo */ + EState *estate; /* pointer to executor's state */ + CmdType command_type; /* INSERT | UPDATE */ + + /* partition relid -> ResultRelInfoHolder */ HTAB *result_rels_table; HASHCTL result_rels_table_config; bool speculative_inserts; /* for ExecOpenIndices() */ - on_new_rri_holder on_new_rri_holder_callback; - void *callback_arg; + rri_holder_cb init_rri_holder_cb; + void *init_rri_holder_cb_arg; - EState *estate; /* pointer to executor's state */ + rri_holder_cb fini_rri_holder_cb; + void *fini_rri_holder_cb_arg; - CmdType command_type; /* currenly we only allow INSERT */ + bool close_relations; LOCKMODE head_open_lock_mode; - LOCKMODE heap_close_lock_mode; -}; -/* - * Standard size of ResultPartsStorage entry. - */ -#define ResultPartsStorageStandard 0 + PartRelationInfo *prel; + ExprState *prel_expr_state; + ExprContext *prel_econtext; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + ResultRelInfo *init_rri; /* first initialized ResultRelInfo */ +#endif +}; typedef struct { @@ -91,13 +116,14 @@ typedef struct Plan *subplan; /* proxy variable to store subplan */ ResultPartsStorage result_parts; /* partition ResultRelInfo cache */ - - bool warning_triggered; /* warning message counter */ + CmdType command_type; TupleTableSlot *tup_convert_slot; /* slot for rebuilt tuples */ - ExprContext *tup_convert_econtext; /* ExprContext for projections */ - ExprState *expr_state; /* for partitioning expression */ +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + Index parent_rti; /* Parent RT index for use of EXPLAIN, + see "ModifyTable::nominalRelation" */ +#endif } PartitionFilterState; @@ -108,40 +134,74 @@ extern CustomScanMethods partition_filter_plan_methods; extern CustomExecMethods partition_filter_exec_methods; +#define IsPartitionFilterPlan(node) \ + ( \ + IsA((node), CustomScan) && \ + (((CustomScan *) (node))->methods == &partition_filter_plan_methods) \ + ) + +#define IsPartitionFilterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_filter_exec_methods) \ + ) + +#define IsPartitionFilter(node) \ + ( IsPartitionFilterPlan(node) || IsPartitionFilterState(node) ) + + + void init_partition_filter_static_data(void); -/* ResultPartsStorage init\fini\scan function */ +/* + * ResultPartsStorage API (select partition for INSERT & UPDATE). + */ + +/* Initialize storage for some parent table */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, + CmdType cmd_type, + bool close_relations, bool speculative_inserts, - Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg); + rri_holder_cb init_rri_holder_cb, + void *init_rri_holder_cb_arg, + rri_holder_cb fini_rri_holder_cb, + void *fini_rri_holder_cb_arg); + +/* Free storage and opened relations */ +void fini_result_parts_storage(ResultPartsStorage *parts_storage); -void fini_result_parts_storage(ResultPartsStorage *parts_storage, - bool close_rels); +/* Find ResultRelInfo holder in storage */ +ResultRelInfoHolder * scan_result_parts_storage(EState *estate, ResultPartsStorage *storage, Oid partid); -ResultRelInfoHolder * scan_result_parts_storage(Oid partid, - ResultPartsStorage *storage); +/* Refresh PartRelationInfo in storage */ +PartRelationInfo * refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid); TupleConversionMap * build_part_tuple_map(Relation parent_rel, Relation child_rel); +TupleConversionMap * build_part_tuple_map_child(Relation child_rel); + +void destroy_tuple_map(TupleConversionMap *tuple_map); + +List * pfilter_build_tlist(Plan *subplan, Index varno); /* Find suitable partition using 'value' */ Oid * find_partitions_for_value(Datum value, Oid value_type, const PartRelationInfo *prel, int *nparts); -ResultRelInfoHolder * select_partition_for_insert(Datum value, Oid value_type, - const PartRelationInfo *prel, - ResultPartsStorage *parts_storage, - EState *estate); - +ResultRelInfoHolder *select_partition_for_insert(EState *estate, + ResultPartsStorage *parts_storage, + TupleTableSlot *slot); Plan * make_partition_filter(Plan *subplan, Oid parent_relid, + Index parent_rti, OnConflictAction conflict_action, + CmdType command_type, List *returning_list); diff --git a/src/include/partition_overseer.h b/src/include/partition_overseer.h new file mode 100644 index 00000000..ddf84c7a --- /dev/null +++ b/src/include/partition_overseer.h @@ -0,0 +1,54 @@ +/* ------------------------------------------------------------------------ + * + * partition_overseer.h + * Restart ModifyTable for unobvious reasons + * + * Copyright (c) 2018, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_OVERSEER_H +#define PARTITION_OVERSEER_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "access/tupconvert.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define OVERSEER_NODE_NAME "PartitionOverseer" + + +extern CustomScanMethods partition_overseer_plan_methods; +extern CustomExecMethods partition_overseer_exec_methods; + + +void init_partition_overseer_static_data(void); +Plan *make_partition_overseer(Plan *subplan); + +Node *partition_overseer_create_scan_state(CustomScan *node); + +void partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags); + +TupleTableSlot *partition_overseer_exec(CustomScanState *node); + +void partition_overseer_end(CustomScanState *node); + +void partition_overseer_rescan(CustomScanState *node); + +void partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + + +#endif /* PARTITION_OVERSEER_H */ diff --git a/src/include/partition_router.h b/src/include/partition_router.h new file mode 100644 index 00000000..d5684eba --- /dev/null +++ b/src/include/partition_router.h @@ -0,0 +1,85 @@ +/* ------------------------------------------------------------------------ + * + * partition_update.h + * Insert row to right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * + * ------------------------------------------------------------------------ + */ + +#ifndef PARTITION_UPDATE_H +#define PARTITION_UPDATE_H + +#include "relation_info.h" +#include "utils.h" + +#include "postgres.h" +#include "commands/explain.h" +#include "optimizer/planner.h" + +#if PG_VERSION_NUM >= 90600 +#include "nodes/extensible.h" +#endif + + +#define UPDATE_NODE_NAME "PartitionRouter" + + +typedef struct PartitionRouterState +{ + CustomScanState css; + + Plan *subplan; /* proxy variable to store subplan */ + ExprState *constraint; /* should tuple remain in partition? */ +#if PG_VERSION_NUM < 140000 /* field removed in 86dc90056dfd */ + JunkFilter *junkfilter; /* 'ctid' extraction facility */ +#endif + ResultRelInfo *current_rri; + + /* Machinery required for EvalPlanQual */ + EPQState epqstate; + int epqparam; + + /* Preserved slot from last call */ + bool yielded; + TupleTableSlot *yielded_slot; +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *yielded_original_slot; +#endif + + /* Need these for a GREAT deal of hackery */ + ModifyTableState *mt_state; + bool update_stmt_triggers, + insert_stmt_triggers; +} PartitionRouterState; + + +extern bool pg_pathman_enable_partition_router; + +extern CustomScanMethods partition_router_plan_methods; +extern CustomExecMethods partition_router_exec_methods; + + +#define IsPartitionRouterState(node) \ + ( \ + IsA((node), CustomScanState) && \ + (((CustomScanState *) (node))->methods == &partition_router_exec_methods) \ + ) + +/* Highlight hacks with ModifyTable's fields */ +#define MTHackField(mt_state, field) ( (mt_state)->field ) + +void init_partition_router_static_data(void); +void partition_router_begin(CustomScanState *node, EState *estate, int eflags); +void partition_router_end(CustomScanState *node); +void partition_router_rescan(CustomScanState *node); +void partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); + +Plan *make_partition_router(Plan *subplan, int epq_param, Index parent_rti); +Node *partition_router_create_scan_state(CustomScan *node); +TupleTableSlot *partition_router_exec(CustomScanState *node); + +#endif /* PARTITION_UPDATE_H */ diff --git a/src/include/pathman.h b/src/include/pathman.h index 9bcd26f0..28f6ef30 100644 --- a/src/include/pathman.h +++ b/src/include/pathman.h @@ -45,12 +45,11 @@ * Definitions for the "pathman_config" table. */ #define PATHMAN_CONFIG "pathman_config" -#define Natts_pathman_config 5 +#define Natts_pathman_config 4 #define Anum_pathman_config_partrel 1 /* partitioned relation (regclass) */ #define Anum_pathman_config_expr 2 /* partition expression (original) */ #define Anum_pathman_config_parttype 3 /* partitioning type (1|2) */ #define Anum_pathman_config_range_interval 4 /* interval for RANGE pt. (text) */ -#define Anum_pathman_config_cooked_expr 5 /* parsed partitioning expression (text) */ /* type modifier (typmod) for 'range_interval' */ #define PATHMAN_CONFIG_interval_typmod -1 @@ -100,13 +99,18 @@ extern Oid pathman_config_params_relid; */ Oid get_pathman_config_relid(bool invalid_is_ok); Oid get_pathman_config_params_relid(bool invalid_is_ok); +Oid get_pathman_schema(void); /* * Create RelOptInfo & RTE for a selected partition. */ -Index append_child_relation(PlannerInfo *root, Relation parent_relation, - Index parent_rti, int ir_index, Oid child_oid, +Index append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, List *wrappers); @@ -114,7 +118,8 @@ Index append_child_relation(PlannerInfo *root, Relation parent_relation, * Copied from PostgreSQL (prepunion.c) */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars); + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo); Bitmapset *translate_col_privs(const Bitmapset *parent_privs, List *translated_vars); diff --git a/src/include/pathman_workers.h b/src/include/pathman_workers.h index 25ab5e1d..be4d6425 100644 --- a/src/include/pathman_workers.h +++ b/src/include/pathman_workers.h @@ -74,7 +74,7 @@ typedef struct pid_t pid; /* worker's PID */ Oid dbid; /* database which contains the relation */ Oid relid; /* table to be partitioned concurrently */ - uint64 total_rows; /* total amount of rows processed */ + int64 total_rows; /* total amount of rows processed */ int32 batch_size; /* number of rows in a batch */ float8 sleep_time; /* how long should we sleep in case of error? */ @@ -112,10 +112,29 @@ cps_set_status(ConcurrentPartSlot *slot, ConcurrentPartSlotStatus status) SpinLockRelease(&slot->mutex); } +static inline const char * +cps_print_status(ConcurrentPartSlotStatus status) +{ + switch(status) + { + case CPS_FREE: + return "free"; + + case CPS_WORKING: + return "working"; + + case CPS_STOPPING: + return "stopping"; + + default: + return "[unknown]"; + } +} + /* Number of worker slots for concurrent partitioning */ -#define PART_WORKER_SLOTS 10 +#define PART_WORKER_SLOTS max_worker_processes /* Max number of attempts per batch */ #define PART_WORKER_MAX_ATTEMPTS 60 diff --git a/src/include/planner_tree_modification.h b/src/include/planner_tree_modification.h index eee1ea76..edca73a0 100644 --- a/src/include/planner_tree_modification.h +++ b/src/include/planner_tree_modification.h @@ -16,7 +16,7 @@ #include "postgres.h" #include "utils/rel.h" -#include "nodes/relation.h" +/* #include "nodes/relation.h" */ #include "nodes/nodeFuncs.h" @@ -25,15 +25,22 @@ void assign_query_id(Query *query); void reset_query_id_generator(void); /* Plan tree rewriting utility */ -void plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context); +Plan * plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), + void *context); -/* Query tree rewriting utility */ +/* PlanState tree rewriting utility */ +void state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *state, void *context), + void *context); + +/* Query tree rewriting utilities */ void pathman_transform_query(Query *parse, ParamListInfo params); +void pathman_post_analyze_query(Query *parse); /* These functions scribble on Plan tree */ -void add_partition_filters(List *rtable, Plan *plan); +Plan *add_partition_filters(List *rtable, Plan *plan); +Plan *add_partition_routers(List *rtable, Plan *plan); /* used by assign_rel_parenthood_status() etc */ @@ -44,12 +51,16 @@ typedef enum PARENTHOOD_ALLOWED /* children are enabled (default) */ } rel_parenthood_status; -void assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +void assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status); -rel_parenthood_status get_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte); +rel_parenthood_status get_rel_parenthood_status(RangeTblEntry *rte); + + +/* used to determine nested planner() calls */ +void incr_planner_calls_count(void); +void decr_planner_calls_count(void); +int32 get_planner_calls_count(void); #endif /* PLANNER_TREE_MODIFICATION_H */ diff --git a/src/include/rangeset.h b/src/include/rangeset.h index 96d6bc21..39db6a53 100644 --- a/src/include/rangeset.h +++ b/src/include/rangeset.h @@ -1,7 +1,6 @@ /* ------------------------------------------------------------------------ * * rangeset.h - * IndexRange functions * * Copyright (c) 2015-2016, Postgres Professional * @@ -17,7 +16,10 @@ /* - * IndexRange contains a set of selected partitions. + * IndexRange is essentially a segment [lower; upper]. This module provides + * functions for efficient working (intersection, union) with Lists of + * IndexRange's; this is used for quick selection of partitions. Numbers are + * indexes of partitions in PartRelationInfo's children. */ typedef struct { /* lossy == should we use quals? */ diff --git a/src/include/relation_info.h b/src/include/relation_info.h index cbc16b6e..a42bf727 100644 --- a/src/include/relation_info.h +++ b/src/include/relation_info.h @@ -3,7 +3,7 @@ * relation_info.h * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,8 +11,10 @@ #ifndef RELATION_INFO_H #define RELATION_INFO_H +#include "compat/pg_compat.h" + +#include "utils.h" -#include "postgres.h" #include "access/attnum.h" #include "access/sysattr.h" #include "fmgr.h" @@ -26,6 +28,13 @@ #include "storage/lock.h" #include "utils/datum.h" #include "utils/lsyscache.h" +#include "utils/relcache.h" + + +#ifdef USE_ASSERT_CHECKING +#define USE_RELINFO_LOGGING +#define USE_RELINFO_LEAK_TRACKER +#endif /* Range bound */ @@ -88,6 +97,14 @@ FreeBound(Bound *bound, bool byval) pfree(DatumGetPointer(BoundGetValue(bound))); } +static inline char * +BoundToCString(const Bound *bound, Oid value_type) +{ + return IsInfinite(bound) ? + pstrdup("NULL") : + datum_to_cstring(bound->value, value_type); +} + static inline int cmp_bounds(FmgrInfo *cmp_func, const Oid collid, @@ -109,9 +126,7 @@ cmp_bounds(FmgrInfo *cmp_func, } -/* - * Partitioning type. - */ +/* Partitioning type */ typedef enum { PT_ANY = 0, /* for part type traits (virtual type) */ @@ -119,9 +134,7 @@ typedef enum PT_RANGE } PartType; -/* - * Child relation info for RANGE partitioning. - */ +/* Child relation info for RANGE partitioning */ typedef struct { Oid child_oid; @@ -130,54 +143,25 @@ typedef struct } RangeEntry; /* - * PartRelationInfo - * Per-relation partitioning information. - * Allows us to perform partition pruning. + * PartStatusInfo + * Cached partitioning status of the specified relation. + * Allows us to quickly search for PartRelationInfo. */ -typedef struct +typedef struct PartStatusInfo { - Oid key; /* partitioned table's Oid */ - bool valid, /* is this entry valid? */ - enable_parent; /* should plan include parent? */ - - PartType parttype; /* partitioning type (HASH | RANGE) */ - - /* Partition dispatch info */ - uint32 children_count; - Oid *children; /* Oids of child partitions */ - RangeEntry *ranges; /* per-partition range entry or NULL */ - - /* Partitioning expression */ - const char *expr_cstr; /* original expression */ - Node *expr; /* planned expression */ - List *expr_vars; /* vars from expression, lazy */ - Bitmapset *expr_atts; /* attnums from expression */ - - /* Partitioning expression's value */ - Oid ev_type; /* expression type */ - int32 ev_typmod; /* expression type modifier */ - bool ev_byval; /* is expression's val stored by value? */ - int16 ev_len; /* length of the expression val's type */ - int ev_align; /* alignment of the expression val's type */ - Oid ev_collid; /* collation of the expression val */ - - Oid cmp_proc, /* comparison fuction for 'ev_type' */ - hash_proc; /* hash function for 'ev_type' */ - - MemoryContext mcxt; /* memory context holding this struct */ -} PartRelationInfo; - -#define PART_EXPR_VARNO ( 1 ) + Oid relid; /* key */ + struct PartRelationInfo *prel; +} PartStatusInfo; /* * PartParentInfo * Cached parent of the specified partition. - * Allows us to quickly search for PartRelationInfo. + * Allows us to quickly search for parent PartRelationInfo. */ -typedef struct +typedef struct PartParentInfo { - Oid child_rel; /* key */ - Oid parent_rel; + Oid child_relid; /* key */ + Oid parent_relid; } PartParentInfo; /* @@ -185,9 +169,9 @@ typedef struct * Cached bounds of the specified partition. * Allows us to deminish overhead of check constraints. */ -typedef struct +typedef struct PartBoundInfo { - Oid child_rel; /* key */ + Oid child_relid; /* key */ PartType parttype; @@ -200,26 +184,68 @@ typedef struct uint32 part_idx; } PartBoundInfo; +static inline void +FreePartBoundInfo(PartBoundInfo *pbin) +{ + if (pbin->parttype == PT_RANGE) + { + FreeBound(&pbin->range_min, pbin->byval); + FreeBound(&pbin->range_max, pbin->byval); + } +} + /* - * PartParentSearch - * Represents status of a specific cached entry. - * Returned by [for]get_parent_of_partition(). + * PartRelationInfo + * Per-relation partitioning information. + * Allows us to perform partition pruning. */ -typedef enum +typedef struct PartRelationInfo { - PPS_ENTRY_NOT_FOUND = 0, - PPS_ENTRY_PARENT, /* entry was found, but pg_pathman doesn't know it */ - PPS_ENTRY_PART_PARENT, /* entry is parent and is known by pg_pathman */ - PPS_NOT_SURE /* can't determine (not transactional state) */ -} PartParentSearch; + Oid relid; /* key */ + int32 refcount; /* reference counter */ + bool fresh; /* is this entry fresh? */ + + bool enable_parent; /* should plan include parent? */ + PartType parttype; /* partitioning type (HASH | RANGE) */ + /* Partition dispatch info */ + uint32 children_count; + Oid *children; /* Oids of child partitions */ + RangeEntry *ranges; /* per-partition range entry or NULL */ + + /* Partitioning expression */ + const char *expr_cstr; /* original expression */ + Node *expr; /* planned expression */ + List *expr_vars; /* vars from expression, lazy */ + Bitmapset *expr_atts; /* attnums from expression */ + + /* Partitioning expression's value */ + Oid ev_type; /* expression type */ + int32 ev_typmod; /* expression type modifier */ + bool ev_byval; /* is expression's val stored by value? */ + int16 ev_len; /* length of the expression val's type */ + int ev_align; /* alignment of the expression val's type */ + Oid ev_collid; /* collation of the expression val */ + + Oid cmp_proc, /* comparison function for 'ev_type' */ + hash_proc; /* hash function for 'ev_type' */ + +#ifdef USE_RELINFO_LEAK_TRACKER + List *owners; /* saved callers of get_pathman_relation_info() */ + uint64 access_total; /* total amount of accesses to this entry */ +#endif + + MemoryContext mcxt; /* memory context holding this struct */ +} PartRelationInfo; + +#define PART_EXPR_VARNO ( 1 ) /* * PartRelationInfo field access macros & functions. */ -#define PrelParentRelid(prel) ( (prel)->key ) +#define PrelParentRelid(prel) ( (prel)->relid ) #define PrelGetChildrenArray(prel) ( (prel)->children ) @@ -227,13 +253,26 @@ typedef enum #define PrelChildrenCount(prel) ( (prel)->children_count ) -#define PrelIsValid(prel) ( (prel) && (prel)->valid ) +#define PrelReferenceCount(prel) ( (prel)->refcount ) + +#define PrelIsFresh(prel) ( (prel)->fresh ) static inline uint32 -PrelLastChild(const PartRelationInfo *prel) +PrelHasPartition(const PartRelationInfo *prel, Oid partition_relid) { - Assert(PrelIsValid(prel)); + Oid *children = PrelGetChildrenArray(prel); + uint32 i; + for (i = 0; i < PrelChildrenCount(prel); i++) + if (children[i] == partition_relid) + return i + 1; + + return 0; +} + +static inline uint32 +PrelLastChild(const PartRelationInfo *prel) +{ if (PrelChildrenCount(prel) == 0) elog(ERROR, "pg_pathman's cache entry for relation %u has 0 children", PrelParentRelid(prel)); @@ -250,7 +289,7 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) while ((i = bms_next_member(prel->expr_atts, i)) >= 0) { AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(PrelParentRelid(prel), attnum); + char *attname = get_attname_compat(PrelParentRelid(prel), attnum); columns = lappend(columns, makeString(attname)); } @@ -259,59 +298,28 @@ PrelExpressionColumnNames(const PartRelationInfo *prel) } static inline Node * -PrelExpressionForRelid(const PartRelationInfo *prel, Index rel_index) +PrelExpressionForRelid(const PartRelationInfo *prel, Index rti) { /* TODO: implement some kind of cache */ Node *expr = copyObject(prel->expr); - if (rel_index != PART_EXPR_VARNO) - ChangeVarNodes(expr, PART_EXPR_VARNO, rel_index, 0); + if (rti != PART_EXPR_VARNO) + ChangeVarNodes(expr, PART_EXPR_VARNO, rti, 0); return expr; } +#if PG_VERSION_NUM >= 130000 +AttrMap *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc); +#else +AttrNumber *PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length); +#endif -const PartRelationInfo *refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete); -PartRelationInfo *invalidate_pathman_relation_info(Oid relid, bool *found); -void remove_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info(Oid relid); -const PartRelationInfo *get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result); - -/* Partitioning expression routines */ -Node *parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, - Node **parsetree_out); - -Datum cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type); - -char *canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr); - -/* Global invalidation routines */ -void delay_pathman_shutdown(void); -void delay_invalidation_parent_rel(Oid parent); -void delay_invalidation_vague_rel(Oid vague_rel); -void finish_delayed_invalidation(void); - -/* Parent cache */ -void cache_parent_of_partition(Oid partition, Oid parent); -Oid forget_parent_of_partition(Oid partition, PartParentSearch *status); -Oid get_parent_of_partition(Oid partition, PartParentSearch *status); - -/* Bounds cache */ -void forget_bounds_of_partition(Oid partition); -PartBoundInfo *get_bounds_of_partition(Oid partition, - const PartRelationInfo *prel); /* PartType wrappers */ - static inline void WrongPartType(PartType parttype) { @@ -332,16 +340,13 @@ DatumGetPartType(Datum datum) static inline char * PartTypeToCString(PartType parttype) { - static char *hash_str = "1", - *range_str = "2"; - switch (parttype) { case PT_HASH: - return hash_str; + return "1"; case PT_RANGE: - return range_str; + return "2"; default: WrongPartType(parttype); @@ -350,48 +355,77 @@ PartTypeToCString(PartType parttype) } -/* PartRelationInfo checker */ +/* Status chache */ +void forget_status_of_relation(Oid relid); +void invalidate_status_cache(void); + +/* Dispatch cache */ +bool has_pathman_relation_info(Oid relid); +PartRelationInfo *get_pathman_relation_info(Oid relid); +void close_pathman_relation_info(PartRelationInfo *prel); + +void qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel); + void shout_if_prel_is_invalid(const Oid parent_oid, const PartRelationInfo *prel, const PartType expected_part_type); +/* Bounds cache */ +void forget_bounds_of_rel(Oid partition); +PartBoundInfo *get_bounds_of_partition(Oid partition, const PartRelationInfo *prel); +Expr *get_partition_constraint_expr(Oid partition, bool raise_error); +void invalidate_bounds_cache(void); -/* - * Useful functions & macros for freeing memory. - */ +/* Parents cache */ +void cache_parent_of_partition(Oid partition, Oid parent); +void forget_parent_of_partition(Oid partition); +Oid get_parent_of_partition(Oid partition); +void invalidate_parents_cache(void); -/* Remove all references to this parent from parents cache */ -static inline void -ForgetParent(PartRelationInfo *prel) -{ - uint32 i; +/* Partitioning expression routines */ +Node *parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, + Node **parsetree_out); - AssertArg(MemoryContextIsValid(prel->mcxt)); +Node *cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type); - /* Remove relevant PartParentInfos */ - if (prel->children) - { - for (i = 0; i < PrelChildrenCount(prel); i++) - { - Oid child = prel->children[i]; - - /* Skip if Oid is invalid (e.g. initialization error) */ - if (!OidIsValid(child)) - continue; - - /* If it's *always been* relid's partition, free cache */ - if (PrelParentRelid(prel) == get_parent_of_partition(child, NULL)) - forget_parent_of_partition(child, NULL); - } - } -} +char *canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr); + +/* Global invalidation routines */ +void delay_pathman_shutdown(void); +void finish_delayed_invalidation(void); + +void init_relation_info_static_data(void); /* For pg_pathman.enable_bounds_cache GUC */ -extern bool pg_pathman_enable_bounds_cache; +extern bool pg_pathman_enable_bounds_cache; -void init_relation_info_static_data(void); +extern HTAB *prel_resowner; +/* This allows us to track leakers of PartRelationInfo */ +#ifdef USE_RELINFO_LEAK_TRACKER +extern const char *prel_resowner_function; +extern int prel_resowner_line; -#endif /* RELATION_INFO_H */ +#define get_pathman_relation_info(relid) \ + ( \ + prel_resowner_function = __FUNCTION__, \ + prel_resowner_line = __LINE__, \ + get_pathman_relation_info(relid) \ + ) + +#define close_pathman_relation_info(prel) \ + do { \ + close_pathman_relation_info(prel); \ + prel = NULL; \ + } while (0) +#endif /* USE_RELINFO_LEAK_TRACKER */ + +#endif /* RELATION_INFO_H */ diff --git a/src/include/runtimeappend.h b/src/include/runtime_append.h similarity index 65% rename from src/include/runtimeappend.h rename to src/include/runtime_append.h index ee25c337..bc76ea70 100644 --- a/src/include/runtimeappend.h +++ b/src/include/runtime_append.h @@ -21,6 +21,9 @@ #include "commands/explain.h" +#define RUNTIME_APPEND_NODE_NAME "RuntimeAppend" + + typedef struct { CustomPath cpath; @@ -41,8 +44,9 @@ typedef struct /* Refined clauses for partition pruning */ List *canon_custom_exprs; - /* Copy of partitioning expression (protect from invalidations) */ + /* Copy of partitioning expression and dispatch info */ Node *prel_expr; + PartRelationInfo *prel; /* All available plans \ plan states */ HTAB *children_table; @@ -70,32 +74,32 @@ extern CustomScanMethods runtimeappend_plan_methods; extern CustomExecMethods runtimeappend_exec_methods; -void init_runtimeappend_static_data(void); +void init_runtime_append_static_data(void); -Path * create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimeappend_create_scan_state(CustomScan *node); +Node * runtime_append_create_scan_state(CustomScan *node); -void runtimeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_append_exec(CustomScanState *node); -void runtimeappend_end(CustomScanState *node); +void runtime_append_end(CustomScanState *node); -void runtimeappend_rescan(CustomScanState *node); +void runtime_append_rescan(CustomScanState *node); -void runtimeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_APPEND_H */ diff --git a/src/include/runtime_merge_append.h b/src/include/runtime_merge_append.h index 9aa6aed9..8d24bf20 100644 --- a/src/include/runtime_merge_append.h +++ b/src/include/runtime_merge_append.h @@ -14,12 +14,15 @@ #define RUNTIME_MERGE_APPEND_H -#include "runtimeappend.h" +#include "runtime_append.h" #include "pathman.h" #include "postgres.h" +#define RUNTIME_MERGE_APPEND_NODE_NAME "RuntimeMergeAppend" + + typedef struct { RuntimeAppendPath rpath; @@ -54,30 +57,30 @@ extern CustomExecMethods runtime_merge_append_exec_methods; void init_runtime_merge_append_static_data(void); -Path * create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel); +Path * create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel); -Plan * create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans); +Plan * create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans); -Node * runtimemergeappend_create_scan_state(CustomScan *node); +Node * runtime_merge_append_create_scan_state(CustomScan *node); -void runtimemergeappend_begin(CustomScanState *node, - EState *estate, - int eflags); +void runtime_merge_append_begin(CustomScanState *node, + EState *estate, + int eflags); -TupleTableSlot * runtimemergeappend_exec(CustomScanState *node); +TupleTableSlot * runtime_merge_append_exec(CustomScanState *node); -void runtimemergeappend_end(CustomScanState *node); +void runtime_merge_append_end(CustomScanState *node); -void runtimemergeappend_rescan(CustomScanState *node); +void runtime_merge_append_rescan(CustomScanState *node); -void runtimemergeappend_explain(CustomScanState *node, - List *ancestors, - ExplainState *es); +void runtime_merge_append_explain(CustomScanState *node, + List *ancestors, + ExplainState *es); #endif /* RUNTIME_MERGE_APPEND_H */ diff --git a/src/include/utility_stmt_hooking.h b/src/include/utility_stmt_hooking.h index 6b45cde3..cc22efaf 100644 --- a/src/include/utility_stmt_hooking.h +++ b/src/include/utility_stmt_hooking.h @@ -23,7 +23,8 @@ /* Various traits */ bool is_pathman_related_copy(Node *parsetree); bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out); + Oid *relation_oid_out, + bool *is_parent_out); bool is_pathman_related_alter_column_type(Node *parsetree, Oid *parent_relid_out, AttrNumber *attr_number, @@ -32,8 +33,9 @@ bool is_pathman_related_alter_column_type(Node *parsetree, /* Statement handlers */ void PathmanDoCopy(const CopyStmt *stmt, const char *queryString, int stmt_location, int stmt_len, uint64 *processed); -void PathmanRenameConstraint(Oid partition_relid, - const RenameStmt *partition_rename_stmt); + +void PathmanRenameConstraint(Oid partition_relid, const RenameStmt *rename_stmt); +void PathmanRenameSequence(Oid parent_relid, const RenameStmt *rename_stmt); #endif /* COPY_STMT_HOOKING_H */ diff --git a/src/include/utils.h b/src/include/utils.h index 16100df7..566c04db 100644 --- a/src/include/utils.h +++ b/src/include/utils.h @@ -12,10 +12,9 @@ #define PATHMAN_UTILS_H -#include "pathman.h" - #include "postgres.h" #include "parser/parse_oper.h" +#include "fmgr.h" /* @@ -24,19 +23,45 @@ bool clause_contains_params(Node *clause); bool is_date_type_internal(Oid typid); bool check_security_policy_internal(Oid relid, Oid role); -bool match_expr_to_operand(Node *expr, Node *operand); +bool match_expr_to_operand(const Node *expr, const Node *operand); /* * Misc. */ -Oid get_pathman_schema(void); -List * list_reverse(List *l); +List *list_reverse(List *l); + +/* + * Dynamic arrays. + */ + +#define ARRAY_EXP 2 + +#define ArrayAlloc(array, alloced, used, size) \ + do { \ + (array) = palloc((size) * sizeof(*(array))); \ + (alloced) = (size); \ + (used) = 0; \ + } while (0) + +#define ArrayPush(array, alloced, used, value) \ + do { \ + if ((alloced) <= (used)) \ + { \ + (alloced) = (alloced) * ARRAY_EXP + 1; \ + (array) = repalloc((array), (alloced) * sizeof(*(array))); \ + } \ + \ + (array)[(used)] = (value); \ + \ + (used)++; \ + } while (0) /* * Useful functions for relations. */ Oid get_rel_owner(Oid relid); -char * get_rel_name_or_relid(Oid relid); +char *get_rel_name_or_relid(Oid relid); +char *get_qualified_rel_name(Oid relid); RangeVar *makeRangeVarFromRelid(Oid relid); /* @@ -52,13 +77,13 @@ void extract_op_func_and_ret_type(char *opname, /* * Print values and cast types. */ -char * datum_to_cstring(Datum datum, Oid typid); +char *datum_to_cstring(Datum datum, Oid typid); Datum perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success); Datum extract_binary_interval_from_text(Datum interval_text, Oid part_atttype, Oid *interval_type); -char ** deconstruct_text_array(Datum array, int *array_size); -RangeVar ** qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); - +char **deconstruct_text_array(Datum array, int *array_size); +RangeVar **qualified_relnames_to_rangevars(char **relnames, size_t nrelnames); +void check_relation_oid(Oid relid); #endif /* PATHMAN_UTILS_H */ diff --git a/src/include/xact_handling.h b/src/include/xact_handling.h index 27939304..fe9f976c 100644 --- a/src/include/xact_handling.h +++ b/src/include/xact_handling.h @@ -28,11 +28,9 @@ LockAcquireResult xact_lock_rel(Oid relid, LOCKMODE lockmode, bool nowait); bool xact_bgw_conflicting_lock_exists(Oid relid); bool xact_is_level_read_committed(void); bool xact_is_transaction_stmt(Node *stmt); -bool xact_is_set_stmt(Node *stmt); +bool xact_is_set_stmt(Node *stmt, const char *name); bool xact_is_alter_pathman_stmt(Node *stmt); bool xact_object_is_visible(TransactionId obj_xmin); -void prevent_data_modification_internal(Oid relid); - #endif /* XACT_HANDLING_H */ diff --git a/src/init.c b/src/init.c index 3219b1fa..1907d9dc 100644 --- a/src/init.c +++ b/src/init.c @@ -3,7 +3,7 @@ * init.c * Initialization functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -21,13 +21,20 @@ #include "utils.h" #include "access/htup_details.h" +#include "access/heapam.h" +#include "access/genam.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "catalog/indexing.h" #include "catalog/pg_extension.h" #include "catalog/pg_inherits.h" -#include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/nodeFuncs.h" +#endif #include "optimizer/clauses.h" #include "utils/inval.h" #include "utils/builtins.h" @@ -38,21 +45,28 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + +#include + /* Various memory contexts for caches */ MemoryContext TopPathmanContext = NULL; -MemoryContext PathmanRelationCacheContext = NULL; -MemoryContext PathmanParentCacheContext = NULL; -MemoryContext PathmanBoundCacheContext = NULL; +MemoryContext PathmanParentsCacheContext = NULL; +MemoryContext PathmanStatusCacheContext = NULL; +MemoryContext PathmanBoundsCacheContext = NULL; -/* Storage for PartRelationInfos */ -HTAB *partitioned_rels = NULL; /* Storage for PartParentInfos */ -HTAB *parent_cache = NULL; +HTAB *parents_cache = NULL; + +/* Storage for PartStatusInfos */ +HTAB *status_cache = NULL; /* Storage for PartBoundInfos */ -HTAB *bound_cache = NULL; +HTAB *bounds_cache = NULL; /* pg_pathman's init status */ PathmanInitState pathman_init_state; @@ -61,25 +75,12 @@ PathmanInitState pathman_init_state; bool pathman_hooks_enabled = true; -/* Shall we install new relcache callback? */ -static bool relcache_callback_needed = true; - - /* Functions for various local caches */ static bool init_pathman_relation_oids(void); static void fini_pathman_relation_oids(void); static void init_local_cache(void); static void fini_local_cache(void); -/* Special handlers for read_pathman_config() */ -static void add_partrel_to_array(Datum *values, bool *isnull, void *context); -static void startup_invalidate_parent(Datum *values, bool *isnull, void *context); - -static void read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context); - static bool validate_range_opexpr(const Expr *expr, const PartRelationInfo *prel, const TypeCacheEntry *tce, @@ -92,9 +93,10 @@ static bool read_opexpr_const(const OpExpr *opexpr, /* Validate SQL facade */ -static uint32 build_sql_facade_version(char *version_cstr); -static uint32 get_sql_facade_version(void); -static void validate_sql_facade_version(uint32 ver); +static uint32 build_semver_uint32(char *version_cstr); +static uint32 get_plpgsql_frontend_version(void); +static void validate_plpgsql_frontend_version(uint32 current_ver, + uint32 compatible_ver); /* @@ -132,15 +134,22 @@ pathman_cache_search_relid(HTAB *cache_table, */ void -save_pathman_init_state(PathmanInitState *temp_init_state) +save_pathman_init_state(volatile PathmanInitState *temp_init_state) { *temp_init_state = pathman_init_state; } void -restore_pathman_init_state(const PathmanInitState *temp_init_state) +restore_pathman_init_state(const volatile PathmanInitState *temp_init_state) { - pathman_init_state = *temp_init_state; + /* + * initialization_needed is not restored: it is not just a setting but + * internal thing, caches must be inited when it is set. Better would be + * to separate it from this struct entirely. + */ + pathman_init_state.pg_pathman_enable = temp_init_state->pg_pathman_enable; + pathman_init_state.auto_partition = temp_init_state->auto_partition; + pathman_init_state.override_copy = temp_init_state->override_copy; } /* @@ -157,7 +166,7 @@ init_main_pathman_toggles(void) DEFAULT_PATHMAN_ENABLE, PGC_SUSET, 0, - NULL, + pathman_enable_check_hook, pathman_enable_assign_hook, NULL); @@ -193,6 +202,8 @@ init_main_pathman_toggles(void) bool load_config(void) { + static bool relcache_callback_needed = true; + /* * Try to cache important relids. * @@ -206,14 +217,12 @@ load_config(void) return false; /* remain 'uninitialized', exit before creating main caches */ /* Validate pg_pathman's Pl/PgSQL facade (might be outdated) */ - validate_sql_facade_version(get_sql_facade_version()); + validate_plpgsql_frontend_version(get_plpgsql_frontend_version(), + build_semver_uint32(LOWEST_COMPATIBLE_FRONT)); /* Create various hash tables (caches) */ init_local_cache(); - /* Read PATHMAN_CONFIG table & fill cache */ - read_pathman_config(startup_invalidate_parent, NULL); - /* Register pathman_relcache_hook(), currently we can't unregister it */ if (relcache_callback_needed) { @@ -264,7 +273,8 @@ static bool init_pathman_relation_oids(void) { Oid schema = get_pathman_schema(); - Assert(schema != InvalidOid); + if (schema == InvalidOid) + return false; /* extension can be dropped by another backend */ /* Cache PATHMAN_CONFIG relation's Oid */ pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, schema); @@ -304,78 +314,80 @@ init_local_cache(void) HASHCTL ctl; /* Destroy caches, just in case */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); /* Reset pg_pathman's memory contexts */ if (TopPathmanContext) { /* Check that child contexts exist */ - Assert(MemoryContextIsValid(PathmanRelationCacheContext)); - Assert(MemoryContextIsValid(PathmanParentCacheContext)); - Assert(MemoryContextIsValid(PathmanBoundCacheContext)); + Assert(MemoryContextIsValid(PathmanParentsCacheContext)); + Assert(MemoryContextIsValid(PathmanStatusCacheContext)); + Assert(MemoryContextIsValid(PathmanBoundsCacheContext)); /* Clear children */ - MemoryContextResetChildren(TopPathmanContext); + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); } /* Initialize pg_pathman's memory contexts */ else { - Assert(PathmanRelationCacheContext == NULL); - Assert(PathmanParentCacheContext == NULL); - Assert(PathmanBoundCacheContext == NULL); + Assert(PathmanParentsCacheContext == NULL); + Assert(PathmanStatusCacheContext == NULL); + Assert(PathmanBoundsCacheContext == NULL); TopPathmanContext = AllocSetContextCreate(TopMemoryContext, - CppAsString(TopPathmanContext), + PATHMAN_TOP_CONTEXT, ALLOCSET_DEFAULT_SIZES); - /* For PartRelationInfo */ - PathmanRelationCacheContext = + /* For PartParentInfo */ + PathmanParentsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanRelationCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_PARENTS_CACHE, + ALLOCSET_SMALL_SIZES); - /* For PartParentInfo */ - PathmanParentCacheContext = + /* For PartStatusInfo */ + PathmanStatusCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanParentCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_STATUS_CACHE, + ALLOCSET_SMALL_SIZES); /* For PartBoundInfo */ - PathmanBoundCacheContext = + PathmanBoundsCacheContext = AllocSetContextCreate(TopPathmanContext, - CppAsString(PathmanBoundCacheContext), - ALLOCSET_DEFAULT_SIZES); + PATHMAN_BOUNDS_CACHE, + ALLOCSET_SMALL_SIZES); } memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartRelationInfo); - ctl.hcxt = PathmanRelationCacheContext; + ctl.entrysize = sizeof(PartParentInfo); + ctl.hcxt = PathmanParentsCacheContext; - partitioned_rels = hash_create("pg_pathman's partition dispatch cache", - PART_RELS_SIZE, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + parents_cache = hash_create(PATHMAN_PARENTS_CACHE, + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); - ctl.entrysize = sizeof(PartParentInfo); - ctl.hcxt = PathmanParentCacheContext; + ctl.entrysize = sizeof(PartStatusInfo); + ctl.hcxt = PathmanStatusCacheContext; - parent_cache = hash_create("pg_pathman's partition parents cache", + status_cache = hash_create(PATHMAN_STATUS_CACHE, PART_RELS_SIZE * CHILD_FACTOR, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(Oid); ctl.entrysize = sizeof(PartBoundInfo); - ctl.hcxt = PathmanBoundCacheContext; + ctl.hcxt = PathmanBoundsCacheContext; - bound_cache = hash_create("pg_pathman's partition bounds cache", - PART_RELS_SIZE * CHILD_FACTOR, &ctl, - HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + bounds_cache = hash_create(PATHMAN_BOUNDS_CACHE, + PART_RELS_SIZE * CHILD_FACTOR, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); } /* @@ -385,16 +397,27 @@ static void fini_local_cache(void) { /* First, destroy hash tables */ - hash_destroy(partitioned_rels); - hash_destroy(parent_cache); - hash_destroy(bound_cache); + hash_destroy(parents_cache); + hash_destroy(status_cache); + hash_destroy(bounds_cache); - partitioned_rels = NULL; - parent_cache = NULL; - bound_cache = NULL; + parents_cache = NULL; + status_cache = NULL; + bounds_cache = NULL; + + if (prel_resowner != NULL) + { + hash_destroy(prel_resowner); + prel_resowner = NULL; + } /* Now we can clear allocations */ - MemoryContextResetChildren(TopPathmanContext); + if (TopPathmanContext) + { + MemoryContextReset(PathmanParentsCacheContext); + MemoryContextReset(PathmanStatusCacheContext); + MemoryContextReset(PathmanBoundsCacheContext); + } } @@ -402,7 +425,7 @@ fini_local_cache(void) * find_inheritance_children * * Returns an array containing the OIDs of all relations which - * inherit *directly* from the relation with OID 'parentrelId'. + * inherit *directly* from the relation with OID 'parent_relid'. * * The specified lock type is acquired on each child relation (but not on the * given rel; caller should already have locked it). If lockmode is NoLock @@ -412,7 +435,7 @@ fini_local_cache(void) * borrowed from pg_inherits.c */ find_children_status -find_inheritance_children_array(Oid parentrelId, +find_inheritance_children_array(Oid parent_relid, LOCKMODE lockmode, bool nowait, uint32 *children_size, /* ret value #1 */ @@ -440,22 +463,20 @@ find_inheritance_children_array(Oid parentrelId, * Can skip the scan if pg_class shows the * relation has never had a subclass. */ - if (!has_subclass(parentrelId)) + if (!has_subclass(parent_relid)) return FCS_NO_CHILDREN; /* * Scan pg_inherits and build a working array of subclass OIDs. */ - maxoids = 32; - oidarr = (Oid *) palloc(maxoids * sizeof(Oid)); - numoids = 0; + ArrayAlloc(oidarr, maxoids, numoids, 32); - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parentrelId)); + ObjectIdGetDatum(parent_relid)); scan = systable_beginscan(relation, InheritsParentIndexId, true, NULL, 1, key); @@ -465,17 +486,12 @@ find_inheritance_children_array(Oid parentrelId, Oid inhrelid; inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; - if (numoids >= maxoids) - { - maxoids *= 2; - oidarr = (Oid *) repalloc(oidarr, maxoids * sizeof(Oid)); - } - oidarr[numoids++] = inhrelid; + ArrayPush(oidarr, maxoids, numoids, inhrelid); } systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); /* * If we found more than one child, sort them by OID. This ensures @@ -554,7 +570,7 @@ find_inheritance_children_array(Oid parentrelId, char * build_check_constraint_name_relid_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return build_check_constraint_name_relname_internal(get_rel_name(relid)); } @@ -565,6 +581,7 @@ build_check_constraint_name_relid_internal(Oid relid) char * build_check_constraint_name_relname_internal(const char *relname) { + Assert(relname != NULL); return psprintf("pathman_%s_check", relname); } @@ -573,10 +590,21 @@ build_check_constraint_name_relname_internal(const char *relname) * NOTE: this function does not perform sanity checks at all. */ char * -build_sequence_name_internal(Oid relid) +build_sequence_name_relid_internal(Oid relid) +{ + Assert(OidIsValid(relid)); + return build_sequence_name_relname_internal(get_rel_name(relid)); +} + +/* + * Generate part sequence name for a parent. + * NOTE: this function does not perform sanity checks at all. + */ +char * +build_sequence_name_relname_internal(const char *relname) { - AssertArg(OidIsValid(relid)); - return psprintf("%s_seq", get_rel_name(relid)); + Assert(relname != NULL); + return psprintf("%s_seq", relname); } /* @@ -586,7 +614,7 @@ build_sequence_name_internal(Oid relid) char * build_update_trigger_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig", get_rel_name(relid)); } @@ -597,7 +625,7 @@ build_update_trigger_name_internal(Oid relid) char * build_update_trigger_func_name_internal(Oid relid) { - AssertArg(OidIsValid(relid)); + Assert(OidIsValid(relid)); return psprintf("%s_upd_trig_func", get_rel_name(relid)); } @@ -612,7 +640,11 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, TransactionId *xmin, ItemPointerData* iptr) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -624,18 +656,21 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, ObjectIdGetDatum(relid)); /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); /* Check that 'partrel' column is of regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); + Assert(TupleDescAttr(RelationGetDescr(rel), + Anum_pathman_config_partrel - 1)->atttypid == REGCLASSOID); /* Check that number of columns == Natts_pathman_config */ Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif while ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -644,6 +679,7 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Extract data if necessary */ if (values && isnull) { + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); /* Perform checks for non-NULL columns */ @@ -654,94 +690,28 @@ pathman_config_contains_relation(Oid relid, Datum *values, bool *isnull, /* Set xmin if necessary */ if (xmin) - { - Datum value; - bool isnull; - - value = heap_getsysattr(htup, - MinTransactionIdAttributeNumber, - RelationGetDescr(rel), - &isnull); - - Assert(!isnull); - *xmin = DatumGetTransactionId(value); - } + *xmin = HeapTupleGetXminCompat(htup); /* Set ItemPointer if necessary */ if (iptr) - *iptr = htup->t_self; + *iptr = htup->t_self; /* FIXME: callers should lock table beforehand */ } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); - elog(DEBUG2, "PATHMAN_CONFIG table %s relation %u", + elog(DEBUG2, "PATHMAN_CONFIG %s relation %u", (contains_rel ? "contains" : "doesn't contain"), relid); return contains_rel; } -/* Invalidate parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_invalidate_parsed_expression(Oid relid) -{ - ItemPointerData iptr; /* pointer to tuple */ - Datum values[Natts_pathman_config]; - bool nulls[Natts_pathman_config]; - - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, nulls, NULL, &iptr)) - { - Relation rel; - HeapTuple new_htup; - - /* Reset parsed expression */ - values[Anum_pathman_config_cooked_expr - 1] = (Datum) 0; - nulls[Anum_pathman_config_cooked_expr - 1] = true; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - /* Form new tuple and perform an update */ - new_htup = heap_form_tuple(RelationGetDescr(rel), values, nulls); - CatalogTupleUpdate(rel, &iptr, new_htup); - - heap_close(rel, RowExclusiveLock); - } -} - -/* Refresh parsed partitioning expression in PATHMAN_CONFIG */ -void -pathman_config_refresh_parsed_expression(Oid relid, - Datum *values, - bool *isnull, - ItemPointer iptr) -{ - char *expr_cstr; - Datum expr_datum; - - Relation rel; - HeapTuple htup_new; - - /* get and parse expression */ - expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - expr_datum = cook_partitioning_expression(relid, expr_cstr, NULL); - pfree(expr_cstr); - - /* prepare tuple values */ - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - - rel = heap_open(get_pathman_config_relid(false), RowExclusiveLock); - - htup_new = heap_form_tuple(RelationGetDescr(rel), values, isnull); - CatalogTupleUpdate(rel, iptr, htup_new); - - heap_close(rel, RowExclusiveLock); -} - - /* * Loads additional pathman parameters like 'enable_parent' * or 'auto' from PATHMAN_CONFIG_PARAMS. @@ -750,7 +720,11 @@ bool read_pathman_params(Oid relid, Datum *values, bool *isnull) { Relation rel; +#if PG_VERSION_NUM >= 120000 + TableScanDesc scan; +#else HeapScanDesc scan; +#endif ScanKeyData key[1]; Snapshot snapshot; HeapTuple htup; @@ -761,14 +735,19 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relid)); - rel = heap_open(get_pathman_config_params_relid(false), AccessShareLock); + rel = heap_open_compat(get_pathman_config_params_relid(false), AccessShareLock); snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + scan = table_beginscan(rel, snapshot, 1, key); +#else scan = heap_beginscan(rel, snapshot, 1, key); +#endif /* There should be just 1 row */ if ((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) { /* Extract data if necessary */ + htup = heap_copytuple(htup); heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); row_found = true; @@ -780,136 +759,18 @@ read_pathman_params(Oid relid, Datum *values, bool *isnull) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(scan); +#else heap_endscan(scan); +#endif UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); + heap_close_compat(rel, AccessShareLock); return row_found; } -typedef struct -{ - Oid *array; - int nelems; - int capacity; -} read_parent_oids_cxt; - -/* - * Get a sorted array of partitioned tables' Oids. - */ -Oid * -read_parent_oids(int *nelems) -{ - read_parent_oids_cxt context = { NULL, 0, 0 }; - - read_pathman_config(add_partrel_to_array, &context); - - /* Perform sorting */ - qsort(context.array, context.nelems, sizeof(Oid), oid_cmp); - - /* Return values */ - *nelems = context.nelems; - return context.array; -} - - -/* read_pathman_config(): add parent to array of Oids */ -static void -add_partrel_to_array(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - read_parent_oids_cxt *result = (read_parent_oids_cxt *) context; - - if (result->array == NULL) - { - result->capacity = PART_RELS_SIZE; - result->array = palloc(result->capacity * sizeof(Oid)); - } - - if (result->nelems >= result->capacity) - { - result->capacity = result->capacity * 2 + 1; - result->array = repalloc(result->array, result->capacity * sizeof(Oid)); - } - - /* Append current relid */ - result->array[result->nelems++] = relid; -} - -/* read_pathman_config(): create dummy cache entry for parent */ -static void -startup_invalidate_parent(Datum *values, bool *isnull, void *context) -{ - Oid relid = DatumGetObjectId(values[Anum_pathman_config_partrel - 1]); - - /* Check that relation 'relid' exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("table \"%s\" contains nonexistent relation %u", - PATHMAN_CONFIG, relid), - errhint(INIT_ERROR_HINT))); - } - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); -} - -/* - * Go through the PATHMAN_CONFIG table and create PartRelationInfo entries. - */ -static void -read_pathman_config(void (*per_row_cb)(Datum *values, - bool *isnull, - void *context), - void *context) -{ - Relation rel; - HeapScanDesc scan; - Snapshot snapshot; - HeapTuple htup; - - /* Open PATHMAN_CONFIG with latest snapshot available */ - rel = heap_open(get_pathman_config_relid(false), AccessShareLock); - - /* Check that 'partrel' column is if regclass type */ - Assert(RelationGetDescr(rel)-> - attrs[Anum_pathman_config_partrel - 1]-> - atttypid == REGCLASSOID); - - /* Check that number of columns == Natts_pathman_config */ - Assert(RelationGetDescr(rel)->natts == Natts_pathman_config); - - snapshot = RegisterSnapshot(GetLatestSnapshot()); - scan = heap_beginscan(rel, snapshot, 0, NULL); - - /* Examine each row and create a PartRelationInfo in local cache */ - while((htup = heap_getnext(scan, ForwardScanDirection)) != NULL) - { - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Extract Datums from tuple 'htup' */ - heap_deform_tuple(htup, RelationGetDescr(rel), values, isnull); - - /* These attributes are marked as NOT NULL, check anyway */ - Assert(!isnull[Anum_pathman_config_partrel - 1]); - Assert(!isnull[Anum_pathman_config_parttype - 1]); - Assert(!isnull[Anum_pathman_config_expr - 1]); - - /* Execute per row callback */ - per_row_cb(values, isnull, context); - } - - /* Clean resources */ - heap_endscan(scan); - UnregisterSnapshot(snapshot); - heap_close(rel, AccessShareLock); -} - - /* * Validates range constraint. It MUST have one of the following formats: * 1) EXPRESSION >= CONST AND EXPRESSION < CONST @@ -936,7 +797,7 @@ validate_range_constraint(const Expr *expr, tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); /* Is it an AND clause? */ - if (and_clause((Node *) expr)) + if (is_andclause_compat((Node *) expr)) { const BoolExpr *boolexpr = (const BoolExpr *) expr; ListCell *lc; @@ -1070,7 +931,7 @@ read_opexpr_const(const OpExpr *opexpr, /* Update RIGHT */ right = (Node *) constant; } - /* FALL THROUGH (no break) */ + /* FALLTHROUGH */ case T_Const: { @@ -1145,7 +1006,6 @@ validate_hash_constraint(const Expr *expr, Node *first = linitial(get_hash_expr->args); /* arg #1: TYPE_HASH_PROC(EXPRESSION) */ Node *second = lsecond(get_hash_expr->args); /* arg #2: PARTITIONS_COUNT */ Const *cur_partition_idx; /* hash value for this partition */ - Node *hash_arg; if (!IsA(first, FuncExpr) || !IsA(second, Const)) return false; @@ -1160,13 +1020,6 @@ validate_hash_constraint(const Expr *expr, if (list_length(type_hash_proc_expr->args) != 1) return false; - /* Extract arg of TYPE_HASH_PROC() */ - hash_arg = (Node *) linitial(type_hash_proc_expr->args); - - /* Check arg of TYPE_HASH_PROC() */ - if (!match_expr_to_operand(prel->expr, hash_arg)) - return false; - /* Check that PARTITIONS_COUNT is equal to total amount of partitions */ if (DatumGetUInt32(((Const *) second)->constvalue) != PrelChildrenCount(prel)) return false; @@ -1195,27 +1048,66 @@ validate_hash_constraint(const Expr *expr, /* Parse cstring and build uint32 representing the version */ static uint32 -build_sql_facade_version(char *version_cstr) +build_semver_uint32(char *version_cstr) { - uint32 version; + uint32 version = 0; + bool expect_num_token = false; + long max_dots = 2; + char *pos = version_cstr; + + while (*pos) + { + /* Invert expected token type */ + expect_num_token = !expect_num_token; + + if (expect_num_token) + { + char *end_pos; + long num; + long i; + + /* Parse number */ + num = strtol(pos, &end_pos, 10); - /* expect to see x+.y+.z+ */ - version = strtol(version_cstr, &version_cstr, 10) & 0xFF; + if (pos == end_pos || num > 99 || num < 0) + goto version_error; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + for (i = 0; i < max_dots; i++) + num *= 100; - version <<= 8; - if (strlen(version_cstr) > 1) - version |= (strtol(version_cstr + 1, &version_cstr, 10) & 0xFF); + version += num; + + /* Move position */ + pos = end_pos; + } + else + { + /* Expect to see less dots */ + max_dots--; + + if (*pos != '.' || max_dots < 0) + goto version_error; + + /* Move position */ + pos++; + } + } + + if (!expect_num_token) + goto version_error; return version; + +version_error: + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("wrong version: \"%s\"", version_cstr), + errhint(INIT_ERROR_HINT))); + return 0; /* keep compiler happy */ } /* Get version of pg_pathman's facade written in Pl/PgSQL */ static uint32 -get_sql_facade_version(void) +get_plpgsql_frontend_version(void) { Relation pg_extension_rel; ScanKeyData skey; @@ -1227,7 +1119,7 @@ get_sql_facade_version(void) char *version_cstr; /* Look up the extension */ - pg_extension_rel = heap_open(ExtensionRelationId, AccessShareLock); + pg_extension_rel = heap_open_compat(ExtensionRelationId, AccessShareLock); ScanKeyInit(&skey, Anum_pg_extension_extname, @@ -1252,22 +1144,23 @@ get_sql_facade_version(void) version_cstr = text_to_cstring(DatumGetTextPP(datum)); systable_endscan(scan); - heap_close(pg_extension_rel, AccessShareLock); + heap_close_compat(pg_extension_rel, AccessShareLock); - return build_sql_facade_version(version_cstr); + return build_semver_uint32(version_cstr); } /* Check that current Pl/PgSQL facade is compatible with internals */ static void -validate_sql_facade_version(uint32 ver) +validate_plpgsql_frontend_version(uint32 current_ver, uint32 compatible_ver) { - Assert(ver > 0); + Assert(current_ver > 0); + Assert(compatible_ver > 0); /* Compare ver to 'lowest compatible frontend' version */ - if (ver < LOWEST_COMPATIBLE_FRONT) + if (current_ver < compatible_ver) { elog(DEBUG1, "current version: %x, lowest compatible: %x", - ver, LOWEST_COMPATIBLE_FRONT); + current_ver, compatible_ver); DisablePathman(); /* disable pg_pathman since config is broken */ ereport(ERROR, diff --git a/src/nodes_common.c b/src/nodes_common.c index 7688bb07..f4ebc6b1 100644 --- a/src/nodes_common.c +++ b/src/nodes_common.c @@ -3,7 +3,7 @@ * nodes_common.c * Common code for custom nodes * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -11,13 +11,17 @@ #include "init.h" #include "nodes_common.h" -#include "runtimeappend.h" +#include "runtime_append.h" #include "utils.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" -#include "optimizer/tlist.h" #include "optimizer/var.h" +#endif +#include "optimizer/tlist.h" #include "rewrite/rewriteManip.h" #include "utils/memutils.h" #include "utils/ruleutils.h" @@ -25,7 +29,6 @@ /* Allocation settings */ #define INITIAL_ALLOC_NUM 10 -#define ALLOC_EXP 2 /* Compare plans by 'original_order' */ @@ -56,7 +59,7 @@ transform_plans_into_states(RuntimeAppendState *scan_state, ChildScanCommon child; PlanState *ps; - AssertArg(selected_plans); + Assert(selected_plans); child = selected_plans[i]; /* Create new node since this plan hasn't been used yet */ @@ -92,12 +95,12 @@ transform_plans_into_states(RuntimeAppendState *scan_state, static ChildScanCommon * select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *result; int i; - result = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); for (i = 0; i < nparts; i++) { @@ -107,13 +110,7 @@ select_required_plans(HTAB *children_table, Oid *parts, int nparts, int *nres) if (!child) continue; /* no plan for this partition */ - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(ChildScanCommon)); - } - - result[used++] = child; + ArrayPush(result, allocated, used, child); } /* Get rid of useless array */ @@ -141,18 +138,42 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) foreach (lc1, pulled_vars) { - Var *tlist_var = (Var *) lfirst(lc1); + Var *tlist_var = (Var *) lfirst(lc1); + bool found_column = false; + AttrNumber attnum; + + /* Skip system attributes */ + if (tlist_var->varattno < InvalidAttrNumber) + continue; - AttrNumber attnum = 0; + attnum = 0; foreach (lc2, appinfo->translated_vars) { Var *translated_var = (Var *) lfirst(lc2); + /* Don't forget to inc 'attunum'! */ attnum++; + /* Skip dropped columns */ + if (!translated_var) + continue; + + /* Find this column in list of parent table columns */ if (translated_var->varattno == tlist_var->varattno) + { tlist_var->varattno = attnum; + found_column = true; /* successful mapping */ + break; + } } + + /* Raise ERROR if mapping failed */ + if (!found_column) + elog(ERROR, + "table \"%s\" has no attribute %d of partition \"%s\"", + get_rel_name_or_relid(appinfo->parent_relid), + tlist_var->varattno, + get_rel_name_or_relid(appinfo->child_relid)); } ChangeVarNodes((Node *) temp_tlist, @@ -163,6 +184,42 @@ build_parent_tlist(List *tlist, AppendRelInfo *appinfo) return temp_tlist; } +#if PG_VERSION_NUM >= 140000 +/* + * Function "tlist_member_ignore_relabel" was removed in vanilla (375398244168) + * Function moved to pg_pathman. + */ +/* + * tlist_member_ignore_relabel + * Finds the (first) member of the given tlist whose expression is + * equal() to the given expression. Result is NULL if no such member. + * We ignore top-level RelabelType nodes + * while checking for a match. This is needed for some scenarios + * involving binary-compatible sort operations. + */ +TargetEntry * +tlist_member_ignore_relabel(Expr *node, List *targetlist) +{ + ListCell *temp; + + while (node && IsA(node, RelabelType)) + node = ((RelabelType *) node)->arg; + + foreach(temp, targetlist) + { + TargetEntry *tlentry = (TargetEntry *) lfirst(temp); + Expr *tlexpr = tlentry->expr; + + while (tlexpr && IsA(tlexpr, RelabelType)) + tlexpr = ((RelabelType *) tlexpr)->arg; + + if (equal(node, tlexpr)) + return tlentry; + } + return NULL; +} +#endif + /* Is tlist 'a' subset of tlist 'b'? (in terms of Vars) */ static bool tlist_is_var_subset(List *a, List *b) @@ -209,7 +266,7 @@ append_part_attr_to_tlist(List *tlist, TargetEntry *te = (TargetEntry *) lfirst(lc); Var *var = (Var *) te->expr; - if (IsA(var, Var) && var->varoattno == child_var->varattno) + if (IsA(var, Var) && var->varattno == child_var->varattno) { part_attr_found = true; break; @@ -343,16 +400,24 @@ canonicalize_custom_exprs_mutator(Node *node, void *cxt) Var *var = palloc(sizeof(Var)); *var = *(Var *) node; +#if PG_VERSION_NUM >= 130000 +/* + * In >=13 (9ce77d75c5) varnoold and varoattno were changed to varnosyn and + * varattnosyn, and they are not consulted in _equalVar anymore. + */ + var->varattno = var->varattnosyn; +#else /* Replace original 'varnoold' */ var->varnoold = INDEX_VAR; /* Restore original 'varattno' */ var->varattno = var->varoattno; +#endif return (Node *) var; } - return expression_tree_mutator(node, canonicalize_custom_exprs_mutator, NULL); + return expression_tree_mutator_compat(node, canonicalize_custom_exprs_mutator, NULL); } static List * @@ -395,11 +460,13 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, bool include_parent) { ListCell *range_cell; - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; - Oid *result = (Oid *) palloc(allocated * sizeof(Oid)); + uint32 allocated, + used; + Oid *result; Oid *children = PrelGetChildrenArray(prel); + ArrayAlloc(result, allocated, used, INITIAL_ALLOC_NUM); + /* If required, add parent to result */ Assert(INITIAL_ALLOC_NUM >= 1); if (include_parent) @@ -414,14 +481,8 @@ get_partition_oids(List *ranges, int *n, const PartRelationInfo *prel, for (i = a; i <= b; i++) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - result = repalloc(result, allocated * sizeof(Oid)); - } - Assert(i < PrelChildrenCount(prel)); - result[used++] = children[i]; + ArrayPush(result, allocated, used, children[i]); } } @@ -535,12 +596,15 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, List *clauses, List *custom_plans, CustomScanMethods *scan_methods) { - RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; - const PartRelationInfo *prel; - CustomScan *cscan; + RuntimeAppendPath *rpath = (RuntimeAppendPath *) best_path; + PartRelationInfo *prel; + CustomScan *cscan; prel = get_pathman_relation_info(rpath->relid); - Assert(prel); + if (!prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(rpath->relid)))); cscan = makeNode(CustomScan); cscan->custom_scan_tlist = NIL; /* initial value (empty list) */ @@ -556,8 +620,10 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, forboth (lc1, rpath->cpath.custom_paths, lc2, custom_plans) { Plan *child_plan = (Plan *) lfirst(lc2); - RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; - AppendRelInfo *appinfo = find_childrel_appendrelinfo(root, child_rel); + RelOptInfo *child_rel = ((Path *) lfirst(lc1))->parent; + AppendRelInfo *appinfo; + + appinfo = find_childrel_appendrelinfo_compat(root, child_rel); /* Replace rel's tlist with a matching one (for ExecQual()) */ if (!processed_rel_tlist) @@ -607,6 +673,9 @@ create_append_plan_common(PlannerInfo *root, RelOptInfo *rel, /* Cache 'prel->enable_parent' as well */ pack_runtimeappend_private(cscan, rpath, prel->enable_parent); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + return &cscan->scan.plan; } @@ -636,17 +705,25 @@ create_append_scan_state_common(CustomScan *node, void begin_append_common(CustomScanState *node, EState *estate, int eflags) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - const PartRelationInfo *prel; + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; #if PG_VERSION_NUM < 100000 node->ss.ps.ps_TupFromTlist = false; #endif - prel = get_pathman_relation_info(scan_state->relid); + scan_state->prel = get_pathman_relation_info(scan_state->relid); + /* + * scan_state->prel can be NULL in case execution of prepared query that + * was prepared before DROP/CREATE EXTENSION pg_pathman or after + * pathman_config table truncation etc. + */ + if (!scan_state->prel) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(scan_state->relid)))); /* Prepare expression according to set_set_customscan_references() */ - scan_state->prel_expr = PrelExpressionForRelid(prel, INDEX_VAR); + scan_state->prel_expr = PrelExpressionForRelid(scan_state->prel, INDEX_VAR); /* Prepare custom expression according to set_set_customscan_references() */ scan_state->canon_custom_exprs = @@ -671,11 +748,25 @@ exec_append_common(CustomScanState *node, return NULL; if (!node->ss.ps.ps_ProjInfo) + { + /* + * ExecInitCustomScan carelessly promises that it will always (resultopsfixed) + * return TTSOpsVirtual slot. To keep the promise, convert raw + * BufferHeapTupleSlot to virtual even if we don't have any projection. + * + * BTW, why original code decided to invent its own scan_state->slot + * instead of using ss.ss_ScanTupleSlot? + */ +#if PG_VERSION_NUM >= 120000 + return ExecCopySlot(node->ss.ps.ps_ResultTupleSlot, scan_state->slot); +#else return scan_state->slot; +#endif + } /* * Assuming that current projection doesn't involve SRF. - * NOTE: Any SFR functions are evaluated in ProjectSet node. + * NOTE: Any SFR functions since 69f4b9c are evaluated in ProjectSet node. */ ResetExprContext(node->ss.ps.ps_ExprContext); node->ss.ps.ps_ProjInfo->pi_exprContext->ecxt_scantuple = scan_state->slot; @@ -726,22 +817,20 @@ end_append_common(CustomScanState *node) clear_plan_states(&scan_state->css); hash_destroy(scan_state->children_table); + close_pathman_relation_info(scan_state->prel); } void rescan_append_common(CustomScanState *node) { - RuntimeAppendState *scan_state = (RuntimeAppendState *) node; - ExprContext *econtext = node->ss.ps.ps_ExprContext; - const PartRelationInfo *prel; - List *ranges; - ListCell *lc; - WalkerContext wcxt; - Oid *parts; - int nparts; - - prel = get_pathman_relation_info(scan_state->relid); - Assert(prel); + RuntimeAppendState *scan_state = (RuntimeAppendState *) node; + ExprContext *econtext = node->ss.ps.ps_ExprContext; + PartRelationInfo *prel = scan_state->prel; + List *ranges; + ListCell *lc; + WalkerContext wcxt; + Oid *parts; + int nparts; /* First we select all available partitions... */ ranges = list_make1_irange_full(prel, IR_COMPLETE); @@ -788,9 +877,18 @@ explain_append_common(CustomScanState *node, char *exprstr; /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 +/* + * Since 6ef77cf46e8 + */ + deparse_context = set_deparse_context_plan(es->deparse_cxt, + node->ss.ps.plan, + ancestors); +#else deparse_context = set_deparse_context_planstate(es->deparse_cxt, (Node *) node, ancestors); +#endif /* Deparse the expression */ exprstr = deparse_expression((Node *) make_ands_explicit(custom_exprs), @@ -802,14 +900,14 @@ explain_append_common(CustomScanState *node, /* Construct excess PlanStates */ if (!es->analyze) { - uint32 allocated = INITIAL_ALLOC_NUM, - used = 0; + uint32 allocated, + used; ChildScanCommon *custom_ps, child; HASH_SEQ_STATUS seqstat; int i; - custom_ps = (ChildScanCommon *) palloc(allocated * sizeof(ChildScanCommon)); + ArrayAlloc(custom_ps, allocated, used, INITIAL_ALLOC_NUM); /* There can't be any nodes since we're not scanning anything */ Assert(!node->custom_ps); @@ -819,13 +917,7 @@ explain_append_common(CustomScanState *node, while ((child = (ChildScanCommon) hash_seq_search(&seqstat))) { - if (allocated <= used) - { - allocated = allocated * ALLOC_EXP + 1; - custom_ps = repalloc(custom_ps, allocated * sizeof(ChildScanCommon)); - } - - custom_ps[used++] = child; + ArrayPush(custom_ps, allocated, used, child); } /* diff --git a/src/partition_creation.c b/src/partition_creation.c index 62f3a6b7..d6080c85 100644 --- a/src/partition_creation.c +++ b/src/partition_creation.c @@ -3,7 +3,7 @@ * partition_creation.c * Various functions for partition creation. * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * *------------------------------------------------------------------------- */ @@ -19,6 +19,9 @@ #include "access/htup_details.h" #include "access/reloptions.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/heap.h" #include "catalog/pg_authid.h" @@ -26,26 +29,35 @@ #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "catalog/toasting.h" +#include "commands/defrem.h" #include "commands/event_trigger.h" #include "commands/sequence.h" #include "commands/tablecmds.h" #include "commands/tablespace.h" #include "commands/trigger.h" +#include "executor/spi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_func.h" #include "parser/parse_utilcmd.h" #include "parser/parse_relation.h" #include "tcop/utility.h" +#if PG_VERSION_NUM >= 130000 +#include "utils/acl.h" +#endif #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" +#include "utils/inval.h" #include "utils/jsonb.h" #include "utils/snapmgr.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM >= 100000 +#include "utils/regproc.h" +#endif static Oid spawn_partitions_val(Oid parent_relid, const Bound *range_bound_min, @@ -74,13 +86,19 @@ static ObjectAddress create_table_using_stmt(CreateStmt *create_stmt, Oid relowner); static void copy_foreign_keys(Oid parent_relid, Oid partition_oid); +static void copy_rel_options(Oid parent_relid, Oid partition_relid); static void postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid); static Oid text_to_regprocedure(text *proname_args); static Constraint *make_constraint_common(char *name, Node *raw_expr); -static Value make_string_value_struct(char *str); +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ +static String make_string_value_struct(char *str); +static Integer make_int_value_struct(int int_val); +#else +static Value make_string_value_struct(char* str); static Value make_int_value_struct(int int_val); +#endif static Node *build_partitioning_expression(Oid parent_relid, Oid *expr_type, @@ -106,6 +124,23 @@ create_single_range_partition_internal(Oid parent_relid, init_callback_params callback_params; List *trigger_columns = NIL; Node *expr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + + /* + * Sanity check. Probably needed only if some absurd init_callback + * decides to drop the table while we are creating partitions. + * It seems much better to use prel cache here, but this doesn't work + * because it regards tables with no partitions as not partitioned at all + * (build_pathman_relation_info returns NULL), and if I comment out that, + * tests fail for not immediately obvious reasons. Don't want to dig + * into this now. + */ + if (!pathman_config_contains_relation(parent_relid, values, isnull, NULL, NULL)) + { + elog(ERROR, "Can't create range partition: relid %u doesn't exist or not partitioned", parent_relid); + } /* Generate a name if asked to */ if (!partition_rv) @@ -217,29 +252,18 @@ create_single_partition_common(Oid parent_relid, init_callback_params *callback_params, List *trigger_columns) { - Relation child_relation; + Relation child_relation; /* Open the relation and add new check constraint & fkeys */ - child_relation = heap_open(partition_relid, AccessExclusiveLock); - AddRelationNewConstraints(child_relation, NIL, - list_make1(check_constraint), - false, true, true); - heap_close(child_relation, NoLock); + child_relation = heap_open_compat(partition_relid, AccessExclusiveLock); + AddRelationNewConstraintsCompat(child_relation, NIL, + list_make1(check_constraint), + false, true, true); + heap_close_compat(child_relation, NoLock); /* Make constraint visible */ CommandCounterIncrement(); - /* Create trigger if needed */ - if (has_update_trigger_internal(parent_relid)) - { - const char *trigger_name; - - trigger_name = build_update_trigger_name_internal(parent_relid); - create_single_update_trigger_internal(partition_relid, - trigger_name, - trigger_columns); - } - /* Make trigger visible */ CommandCounterIncrement(); @@ -302,8 +326,7 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(DEBUG2, "create_partitions(): chose backend [%u]", MyProcPid); last_partition = create_partitions_for_value_internal(relid, value, - value_type, - false); /* backend */ + value_type); } } else @@ -315,6 +338,9 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) elog(ERROR, "could not create new partitions for relation \"%s\"", get_rel_name_or_relid(relid)); + /* Make changes visible */ + AcceptInvalidationMessages(); + return last_partition; } @@ -332,135 +358,118 @@ create_partitions_for_value(Oid relid, Datum value, Oid value_type) * use create_partitions_for_value() instead. */ Oid -create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type, - bool is_background_worker) +create_partitions_for_value_internal(Oid relid, Datum value, Oid value_type) { - MemoryContext old_mcxt = CurrentMemoryContext; Oid partid = InvalidOid; /* last created partition (or InvalidOid) */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; - PG_TRY(); + /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) { - const PartRelationInfo *prel; - LockAcquireResult lock_result; /* could we lock the parent? */ - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; - - /* Get both PartRelationInfo & PATHMAN_CONFIG contents for this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) - { - Oid base_bound_type; /* base type of prel->ev_type */ - Oid base_value_type; /* base type of value_type */ - - /* Fetch PartRelationInfo by 'relid' */ - prel = get_pathman_relation_info_after_lock(relid, true, &lock_result); - shout_if_prel_is_invalid(relid, prel, PT_RANGE); - - /* Fetch base types of prel->ev_type & value_type */ - base_bound_type = getBaseType(prel->ev_type); - base_value_type = getBaseType(value_type); + PartRelationInfo *prel; + LockAcquireResult lock_result; /* could we lock the parent? */ + Oid base_bound_type; /* base type of prel->ev_type */ + Oid base_value_type; /* base type of value_type */ - /* Search for a suitable partition if we didn't hold it */ - Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); - if (lock_result == LOCKACQUIRE_OK) - { - Oid *parts; - int nparts; + /* Prevent modifications of partitioning scheme */ + lock_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); + /* Fetch PartRelationInfo by 'relid' */ + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_RANGE); - /* Shout if there's more than one */ - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); + /* Fetch base types of prel->ev_type & value_type */ + base_bound_type = getBaseType(prel->ev_type); + base_value_type = getBaseType(value_type); - /* It seems that we got a partition! */ - else if (nparts == 1) - { - /* Unlock the parent (we're not going to spawn) */ - UnlockRelationOid(relid, ShareUpdateExclusiveLock); + /* + * Search for a suitable partition if we didn't hold it, + * since somebody might have just created it for us. + * + * If the table is locked, it means that we've + * already failed to find a suitable partition + * and called this function to do the job. + */ + Assert(lock_result != LOCKACQUIRE_NOT_AVAIL); + if (lock_result == LOCKACQUIRE_OK) + { + Oid *parts; + int nparts; - /* Simply return the suitable partition */ - partid = parts[0]; - } + /* Search for matching partitions */ + parts = find_partitions_for_value(value, value_type, prel, &nparts); - /* Don't forget to free */ - pfree(parts); - } + /* Shout if there's more than one */ + if (nparts > 1) + elog(ERROR, ERR_PART_ATTR_MULTIPLE); - /* Else spawn a new one (we hold a lock on the parent) */ - if (partid == InvalidOid) + /* It seems that we got a partition! */ + else if (nparts == 1) { - RangeEntry *ranges = PrelGetRangesArray(prel); - Bound bound_min, /* absolute MIN */ - bound_max; /* absolute MAX */ + /* Unlock the parent (we're not going to spawn) */ + UnlockRelationOid(relid, ShareUpdateExclusiveLock); - Oid interval_type = InvalidOid; - Datum interval_binary, /* assigned 'width' of one partition */ - interval_text; + /* Simply return the suitable partition */ + partid = parts[0]; + } - /* Copy datums in order to protect them from cache invalidation */ - bound_min = CopyBound(&ranges[0].min, - prel->ev_byval, - prel->ev_len); + /* Don't forget to free */ + pfree(parts); + } - bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, - prel->ev_byval, - prel->ev_len); + /* Else spawn a new one (we hold a lock on the parent) */ + if (partid == InvalidOid) + { + RangeEntry *ranges = PrelGetRangesArray(prel); + Bound bound_min, /* absolute MIN */ + bound_max; /* absolute MAX */ - /* Check if interval is set */ - if (isnull[Anum_pathman_config_range_interval - 1]) - { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot spawn new partition for key '%s'", - datum_to_cstring(value, value_type)), - errdetail("default range interval is NULL"))); - } + Oid interval_type = InvalidOid; + Datum interval_binary, /* assigned 'width' of one partition */ + interval_text; - /* Retrieve interval as TEXT from tuple */ - interval_text = values[Anum_pathman_config_range_interval - 1]; + /* Copy datums in order to protect them from cache invalidation */ + bound_min = CopyBound(&ranges[0].min, + prel->ev_byval, + prel->ev_len); - /* Convert interval to binary representation */ - interval_binary = extract_binary_interval_from_text(interval_text, - base_bound_type, - &interval_type); + bound_max = CopyBound(&ranges[PrelLastChild(prel)].max, + prel->ev_byval, + prel->ev_len); - /* At last, spawn partitions to store the value */ - partid = spawn_partitions_val(PrelParentRelid(prel), - &bound_min, &bound_max, base_bound_type, - interval_binary, interval_type, - value, base_value_type, - prel->ev_collid); + /* Check if interval is set */ + if (isnull[Anum_pathman_config_range_interval - 1]) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot spawn new partition for key '%s'", + datum_to_cstring(value, value_type)), + errdetail("default range interval is NULL"))); } - } - else - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(relid)); - } - PG_CATCH(); - { - ErrorData *error; - - /* Simply rethrow ERROR if we're in backend */ - if (!is_background_worker) - PG_RE_THROW(); - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); + /* Retrieve interval as TEXT from tuple */ + interval_text = values[Anum_pathman_config_range_interval - 1]; - /* Produce log message if we're in BGW */ - error->elevel = LOG; - error->message = psprintf(CppAsString(create_partitions_for_value_internal) - ": %s [%u]", error->message, MyProcPid); + /* Convert interval to binary representation */ + interval_binary = extract_binary_interval_from_text(interval_text, + base_bound_type, + &interval_type); - ReThrowError(error); + /* At last, spawn partitions to store the value */ + partid = spawn_partitions_val(PrelParentRelid(prel), + &bound_min, &bound_max, base_bound_type, + interval_binary, interval_type, + value, base_value_type, + prel->ev_collid); + } - /* Reset 'partid' in case of error */ - partid = InvalidOid; + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); } - PG_END_TRY(); + else + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); return partid; } @@ -568,6 +577,15 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ check_lt(&cmp_value_bound_finfo, collid, value, cur_leading_bound)) { Bound bounds[2]; + int rc; + bool isnull; + char *create_sql; + HeapTuple typeTuple; + char *typname; + Oid parent_nsp = get_rel_namespace(parent_relid); + char *parent_nsp_name = get_namespace_name(parent_nsp); + char *partition_name = choose_range_partition_name(parent_relid, parent_nsp); + char *pathman_schema; /* Assign the 'following' boundary to current 'leading' value */ cur_following_bound = cur_leading_bound; @@ -580,10 +598,50 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ bounds[0] = MakeBound(should_append ? cur_following_bound : cur_leading_bound); bounds[1] = MakeBound(should_append ? cur_leading_bound : cur_following_bound); - last_partition = create_single_range_partition_internal(parent_relid, - &bounds[0], &bounds[1], - range_bound_type, - NULL, NULL); + /* + * Instead of directly calling create_single_range_partition_internal() + * we are going to call it through SPI, to make it possible for various + * DDL-replicating extensions to catch that call and do something about + * it. --sk + */ + + /* Get typname of range_bound_type to perform cast */ + typeTuple = SearchSysCache1(TYPEOID, ObjectIdGetDatum(range_bound_type)); + if (!HeapTupleIsValid(typeTuple)) + elog(ERROR, "cache lookup failed for type %u", range_bound_type); + typname = pstrdup(NameStr(((Form_pg_type) GETSTRUCT(typeTuple))->typname)); + ReleaseSysCache(typeTuple); + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + + /* Construct call to create_single_range_partition() */ + create_sql = psprintf( + "select %s.create_single_range_partition('%s.%s'::regclass, '%s'::%s, '%s'::%s, '%s.%s', NULL::text)", + quote_identifier(pathman_schema), + quote_identifier(parent_nsp_name), + quote_identifier(get_rel_name(parent_relid)), + IsInfinite(&bounds[0]) ? "NULL" : datum_to_cstring(bounds[0].value, range_bound_type), + typname, + IsInfinite(&bounds[1]) ? "NULL" : datum_to_cstring(bounds[1].value, range_bound_type), + typname, + quote_identifier(parent_nsp_name), + quote_identifier(partition_name) + ); + + /* ...and call it. */ + SPI_connect(); + PushActiveSnapshot(GetTransactionSnapshot()); + rc = SPI_execute(create_sql, false, 0); + if (rc <= 0 || SPI_processed != 1) + elog(ERROR, "Failed to create range partition"); + last_partition = DatumGetObjectId(SPI_getbinval(SPI_tuptable->vals[0], + SPI_tuptable->tupdesc, + 1, &isnull)); + Assert(!isnull); + SPI_finish(); + PopActiveSnapshot(); #ifdef USE_ASSERT_CHECKING elog(DEBUG2, "%s partition with following='%s' & leading='%s' [%u]", @@ -601,22 +659,31 @@ spawn_partitions_val(Oid parent_relid, /* parent's Oid */ static char * choose_range_partition_name(Oid parent_relid, Oid parent_nsp) { - Datum part_num; - Oid part_seq_relid; - char *part_seq_relname; - Oid save_userid; - int save_sec_context; - bool need_priv_escalation = !superuser(); /* we might be a SU */ - char *relname; - int attempts_cnt = 1000; - - part_seq_relname = build_sequence_name_internal(parent_relid); - part_seq_relid = get_relname_relid(part_seq_relname, parent_nsp); + Datum part_num; + Oid part_seq_relid; + char *part_seq_nspname, + *part_seq_relname; + RangeVar *part_seq_rv; + Oid save_userid; + int save_sec_context; + bool need_priv_escalation = !superuser(); /* we might be a SU */ + char *relname; + int attempts_cnt = 1000; + + /* Dispatch sequence and lock it using AccessShareLock */ + part_seq_nspname = get_namespace_name(get_rel_namespace(parent_relid)); + part_seq_relname = build_sequence_name_relid_internal(parent_relid); + part_seq_rv = makeRangeVar(part_seq_nspname, part_seq_relname, -1); + part_seq_relid = RangeVarGetRelid(part_seq_rv, AccessShareLock, true); /* Could not find part number generating sequence */ if (!OidIsValid(part_seq_relid)) elog(ERROR, "auto naming sequence \"%s\" does not exist", part_seq_relname); + pfree(part_seq_nspname); + pfree(part_seq_relname); + pfree(part_seq_rv); + /* Do we have to escalate privileges? */ if (need_priv_escalation) { @@ -639,7 +706,7 @@ choose_range_partition_name(Oid parent_relid, Oid parent_nsp) (uint64) DatumGetInt64(part_num)); /* can't use UInt64 on 9.5 */ /* - * If we found a unique name or attemps number exceeds some reasonable + * If we found a unique name or attempts number exceeds some reasonable * value then we quit * * XXX Should we throw an exception if max attempts number is reached? @@ -730,8 +797,6 @@ create_single_partition_internal(Oid parent_relid, /* Make up parent's RangeVar */ parent_rv = makeRangeVar(parent_nsp_name, parent_name, -1); - Assert(partition_rv); - /* If no 'tablespace' is provided, get parent's tablespace */ if (!tablespace) tablespace = get_tablespace_name(get_rel_tablespace(parent_relid)); @@ -754,13 +819,16 @@ create_single_partition_internal(Oid parent_relid, create_stmt.oncommit = ONCOMMIT_NOOP; create_stmt.tablespacename = tablespace; create_stmt.if_not_exists = false; -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 90600 - create_stmt.partition_info = NULL; -#endif #if PG_VERSION_NUM >= 100000 create_stmt.partbound = NULL; create_stmt.partspec = NULL; #endif +#if defined(PGPRO_EE) && PG_VERSION_NUM < 100000 + create_stmt.partition_info = NULL; +#endif +#if PG_VERSION_NUM >= 120000 + create_stmt.accessMethod = NULL; +#endif /* Obtain the sequence of Stmts to create partition and link it to parent */ create_stmts = transformCreateStmt(&create_stmt, NULL); @@ -784,6 +852,9 @@ create_single_partition_internal(Oid parent_relid, partition_relid = create_table_using_stmt((CreateStmt *) cur_stmt, child_relowner).objectId; + /* Copy attributes to partition */ + copy_rel_options(parent_relid, partition_relid); + /* Copy FOREIGN KEYS of the parent table */ copy_foreign_keys(parent_relid, partition_relid); @@ -797,6 +868,37 @@ create_single_partition_internal(Oid parent_relid, { elog(ERROR, "FDW partition creation is not implemented yet"); } + /* + * 3737965249cd fix (since 12.5, 11.10, etc) reworked LIKE handling + * to process it after DefineRelation. + */ +#if (PG_VERSION_NUM >= 130000) || \ + ((PG_VERSION_NUM < 130000) && (PG_VERSION_NUM >= 120005)) || \ + ((PG_VERSION_NUM < 120000) && (PG_VERSION_NUM >= 110010)) || \ + ((PG_VERSION_NUM < 110000) && (PG_VERSION_NUM >= 100015)) || \ + ((PG_VERSION_NUM < 100000) && (PG_VERSION_NUM >= 90620)) || \ + ((PG_VERSION_NUM < 90600) && (PG_VERSION_NUM >= 90524)) + else if (IsA(cur_stmt, TableLikeClause)) + { + /* + * Do delayed processing of LIKE options. This + * will result in additional sub-statements for us + * to process. We can just tack those onto the + * to-do list. + */ + TableLikeClause *like = (TableLikeClause *) cur_stmt; + RangeVar *rv = create_stmt.relation; + List *morestmts; + + morestmts = expandTableLikeClause(rv, like); + create_stmts = list_concat(create_stmts, morestmts); + + /* + * We don't need a CCI now + */ + continue; + } +#endif else { /* @@ -896,17 +998,17 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) Snapshot snapshot; /* Both parent & partition have already been locked */ - parent_rel = heap_open(parent_relid, NoLock); - partition_rel = heap_open(partition_relid, NoLock); + parent_rel = heap_open_compat(parent_relid, NoLock); + partition_rel = heap_open_compat(partition_relid, NoLock); - make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars); + make_inh_translation_list(parent_rel, partition_rel, 0, &translated_vars, NULL); - heap_close(parent_rel, NoLock); - heap_close(partition_rel, NoLock); + heap_close_compat(parent_rel, NoLock); + heap_close_compat(partition_rel, NoLock); /* Open catalog's relations */ - pg_class_rel = heap_open(RelationRelationId, RowExclusiveLock); - pg_attribute_rel = heap_open(AttributeRelationId, RowExclusiveLock); + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + pg_attribute_rel = heap_open_compat(AttributeRelationId, RowExclusiveLock); /* Get most recent snapshot */ snapshot = RegisterSnapshot(GetLatestSnapshot()); @@ -926,8 +1028,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_class_desc->attrs[Anum_pg_class_relacl - 1]; - + acl_column = TupleDescAttr(pg_class_desc, Anum_pg_class_relacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, acl_column->attlen); } @@ -936,7 +1037,11 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Search for 'partition_relid' */ ScanKeyInit(&skey[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_class_oid, +#else ObjectIdAttributeNumber, +#endif BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partition_relid)); @@ -1003,7 +1108,7 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) { Form_pg_attribute acl_column; - acl_column = pg_attribute_desc->attrs[Anum_pg_attribute_attacl - 1]; + acl_column = TupleDescAttr(pg_attribute_desc, Anum_pg_attribute_attacl - 1); acl_datum = datumCopy(acl_datum, acl_column->attbyval, @@ -1074,22 +1179,29 @@ postprocess_child_table_and_atts(Oid parent_relid, Oid partition_relid) /* Don't forget to free snapshot */ UnregisterSnapshot(snapshot); - heap_close(pg_class_rel, RowExclusiveLock); - heap_close(pg_attribute_rel, RowExclusiveLock); + heap_close_compat(pg_class_rel, RowExclusiveLock); + heap_close_compat(pg_attribute_rel, RowExclusiveLock); } -/* Copy foreign keys of parent table */ +/* Copy foreign keys of parent table (updates pg_class) */ static void copy_foreign_keys(Oid parent_relid, Oid partition_oid) { Oid copy_fkeys_proc_args[] = { REGCLASSOID, REGCLASSOID }; List *copy_fkeys_proc_name; FmgrInfo copy_fkeys_proc_flinfo; - FunctionCallInfoData copy_fkeys_proc_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(copy_fkeys_proc_fcinfo, 2); +#else + FunctionCallInfoData copy_fkeys_proc_fcinfo_data; + FunctionCallInfo copy_fkeys_proc_fcinfo = ©_fkeys_proc_fcinfo_data; +#endif char *pathman_schema; /* Fetch pg_pathman's schema */ pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Build function's name */ copy_fkeys_proc_name = list_make2(makeString(pathman_schema), @@ -1100,15 +1212,87 @@ copy_foreign_keys(Oid parent_relid, Oid partition_oid) copy_fkeys_proc_args, false), ©_fkeys_proc_flinfo); - InitFunctionCallInfoData(copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, + InitFunctionCallInfoData(*copy_fkeys_proc_fcinfo, ©_fkeys_proc_flinfo, 2, InvalidOid, NULL, NULL); - copy_fkeys_proc_fcinfo.arg[0] = ObjectIdGetDatum(parent_relid); - copy_fkeys_proc_fcinfo.argnull[0] = false; - copy_fkeys_proc_fcinfo.arg[1] = ObjectIdGetDatum(partition_oid); - copy_fkeys_proc_fcinfo.argnull[1] = false; +#if PG_VERSION_NUM >= 120000 + copy_fkeys_proc_fcinfo->args[0].value = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->args[0].isnull = false; + copy_fkeys_proc_fcinfo->args[1].value = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->args[1].isnull = false; +#else + copy_fkeys_proc_fcinfo->arg[0] = ObjectIdGetDatum(parent_relid); + copy_fkeys_proc_fcinfo->argnull[0] = false; + copy_fkeys_proc_fcinfo->arg[1] = ObjectIdGetDatum(partition_oid); + copy_fkeys_proc_fcinfo->argnull[1] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(©_fkeys_proc_fcinfo); + FunctionCallInvoke(copy_fkeys_proc_fcinfo); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* Copy reloptions of foreign table (updates pg_class) */ +static void +copy_rel_options(Oid parent_relid, Oid partition_relid) +{ + Relation pg_class_rel; + + HeapTuple parent_htup, + partition_htup, + new_htup; + + Datum reloptions; + bool reloptions_null; + Datum relpersistence; + + Datum values[Natts_pg_class]; + bool isnull[Natts_pg_class], + replace[Natts_pg_class] = { false }; + + pg_class_rel = heap_open_compat(RelationRelationId, RowExclusiveLock); + + parent_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(parent_relid)); + partition_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(partition_relid)); + + if (!HeapTupleIsValid(parent_htup)) + elog(ERROR, "cache lookup failed for relation %u", parent_relid); + + if (!HeapTupleIsValid(partition_htup)) + elog(ERROR, "cache lookup failed for relation %u", partition_relid); + + /* Extract parent's reloptions */ + reloptions = SysCacheGetAttr(RELOID, parent_htup, + Anum_pg_class_reloptions, + &reloptions_null); + + /* Extract parent's relpersistence */ + relpersistence = ((Form_pg_class) GETSTRUCT(parent_htup))->relpersistence; + + /* Fill in reloptions */ + values[Anum_pg_class_reloptions - 1] = reloptions; + isnull[Anum_pg_class_reloptions - 1] = reloptions_null; + replace[Anum_pg_class_reloptions - 1] = true; + + /* Fill in relpersistence */ + values[Anum_pg_class_relpersistence - 1] = relpersistence; + isnull[Anum_pg_class_relpersistence - 1] = false; + replace[Anum_pg_class_relpersistence - 1] = true; + + new_htup = heap_modify_tuple(partition_htup, + RelationGetDescr(pg_class_rel), + values, isnull, replace); + CatalogTupleUpdate(pg_class_rel, &new_htup->t_self, new_htup); + heap_freetuple(new_htup); + + ReleaseSysCache(parent_htup); + ReleaseSysCache(partition_htup); + + heap_close_compat(pg_class_rel, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); } @@ -1123,15 +1307,21 @@ void drop_pathman_check_constraint(Oid relid) { char *constr_name; +#if PG_VERSION_NUM >= 130000 + List *cmds; +#else AlterTableStmt *stmt; +#endif AlterTableCmd *cmd; /* Build a correct name for this constraint */ constr_name = build_check_constraint_name_relid_internal(relid); +#if PG_VERSION_NUM < 130000 stmt = makeNode(AlterTableStmt); stmt->relation = makeRangeVarFromRelid(relid); stmt->relkind = OBJECT_TABLE; +#endif cmd = makeNode(AlterTableCmd); cmd->subtype = AT_DropConstraint; @@ -1139,23 +1329,35 @@ drop_pathman_check_constraint(Oid relid) cmd->behavior = DROP_RESTRICT; cmd->missing_ok = true; +#if PG_VERSION_NUM >= 130000 + cmds = list_make1(cmd); + + /* + * Since 1281a5c907b AlterTable() was changed. + * recurse = true (see stmt->relation->inh makeRangeVarFromRelid() makeRangeVar()) + * Dropping constraint won't do parse analyze, so AlterTableInternal + * is enough. + */ + AlterTableInternal(relid, cmds, true); +#else stmt->cmds = list_make1(cmd); /* See function AlterTableGetLockLevel() */ AlterTable(relid, AccessExclusiveLock, stmt); +#endif } /* Add pg_pathman's check constraint using 'relid' */ void add_pathman_check_constraint(Oid relid, Constraint *constraint) { - Relation part_rel = heap_open(relid, AccessExclusiveLock); + Relation part_rel = heap_open_compat(relid, AccessExclusiveLock); - AddRelationNewConstraints(part_rel, NIL, - list_make1(constraint), - false, true, true); + AddRelationNewConstraintsCompat(part_rel, NIL, + list_make1(constraint), + false, true, true); - heap_close(part_rel, NoLock); + heap_close_compat(part_rel, NoLock); } @@ -1167,12 +1369,21 @@ build_raw_range_check_tree(Node *raw_expression, const Bound *end_value, Oid value_type) { +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ +#define BuildConstExpr(node, value, value_type) \ + do { \ + (node)->val.sval = make_string_value_struct( \ + datum_to_cstring((value), (value_type))); \ + (node)->location = -1; \ + } while (0) +#else #define BuildConstExpr(node, value, value_type) \ do { \ (node)->val = make_string_value_struct( \ datum_to_cstring((value), (value_type))); \ (node)->location = -1; \ } while (0) +#endif #define BuildCmpExpr(node, opname, expr, c) \ do { \ @@ -1223,7 +1434,7 @@ build_raw_range_check_tree(Node *raw_expression, and_oper->args = lappend(and_oper->args, left_arg); } - /* Right comparision (VAR < end_value) */ + /* Right comparison (VAR < end_value) */ if (!IsInfinite(end_value)) { /* Build right boundary */ @@ -1289,56 +1500,57 @@ check_range_available(Oid parent_relid, Oid value_type, bool raise_error) { - const PartRelationInfo *prel; - RangeEntry *ranges; - FmgrInfo cmp_func; - uint32 i; + PartRelationInfo *prel; + bool result = true; /* Try fetching the PartRelationInfo structure */ - prel = get_pathman_relation_info(parent_relid); - - /* If there's no prel, return TRUE (overlap is not possible) */ - if (!prel) + if ((prel = get_pathman_relation_info(parent_relid)) != NULL) { - ereport(WARNING, (errmsg("table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)))); - return true; - } + RangeEntry *ranges; + FmgrInfo cmp_func; + uint32 i; - /* Emit an error if it is not partitioned by RANGE */ - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Emit an error if it is not partitioned by RANGE */ + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); - /* Fetch comparison function */ - fill_type_cmp_fmgr_info(&cmp_func, - getBaseType(value_type), - getBaseType(prel->ev_type)); + /* Fetch comparison function */ + fill_type_cmp_fmgr_info(&cmp_func, + getBaseType(value_type), + getBaseType(prel->ev_type)); - ranges = PrelGetRangesArray(prel); - for (i = 0; i < PrelChildrenCount(prel); i++) - { - int c1, c2; + ranges = PrelGetRangesArray(prel); + for (i = 0; i < PrelChildrenCount(prel); i++) + { + int c1, c2; - c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); - c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); + c1 = cmp_bounds(&cmp_func, prel->ev_collid, start, &ranges[i].max); + c2 = cmp_bounds(&cmp_func, prel->ev_collid, end, &ranges[i].min); - /* There's something! */ - if (c1 < 0 && c2 > 0) - { - if (raise_error) - elog(ERROR, "specified range [%s, %s) overlaps " - "with existing partitions", - IsInfinite(start) ? - "NULL" : - datum_to_cstring(BoundGetValue(start), value_type), - IsInfinite(end) ? - "NULL" : - datum_to_cstring(BoundGetValue(end), value_type)); - - else return false; + /* There's something! */ + if (c1 < 0 && c2 > 0) + { + if (raise_error) + { + elog(ERROR, "specified range [%s, %s) overlaps " + "with existing partitions", + BoundToCString(start, value_type), + BoundToCString(end, value_type)); + } + /* Too bad, so sad */ + else result = false; + } } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + else + { + ereport(WARNING, (errmsg("table \"%s\" is not partitioned", + get_rel_name_or_relid(parent_relid)))); } - return true; + return result; } /* Build HASH check constraint expression tree */ @@ -1359,16 +1571,25 @@ build_raw_hash_check_tree(Node *raw_expression, Oid hash_proc; TypeCacheEntry *tce; + char *pathman_schema; tce = lookup_type_cache(value_type, TYPECACHE_HASH_PROC); hash_proc = tce->hash_proc; /* Total amount of partitions */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ + part_count_c->val.ival = make_int_value_struct(part_count); +#else part_count_c->val = make_int_value_struct(part_count); +#endif part_count_c->location = -1; /* Index of this partition (hash % total amount) */ +#if PG_VERSION_NUM >= 150000 /* for commit 639a86e36aae */ + part_idx_c->val.ival = make_int_value_struct(part_idx); +#else part_idx_c->val = make_int_value_struct(part_idx); +#endif part_idx_c->location = -1; /* Call hash_proc() */ @@ -1383,9 +1604,13 @@ build_raw_hash_check_tree(Node *raw_expression, hash_call->over = NULL; hash_call->location = -1; + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Build schema-qualified name of function get_hash_part_idx() */ get_hash_part_idx_proc = - list_make2(makeString(get_namespace_name(get_pathman_schema())), + list_make2(makeString(pathman_schema), makeString("get_hash_part_idx")); /* Call get_hash_part_idx() */ @@ -1459,6 +1684,29 @@ make_constraint_common(char *name, Node *raw_expr) return constraint; } +#if PG_VERSION_NUM >= 150000 /* for commits 639a86e36aae, c4cc2850f4d1 */ +static String +make_string_value_struct(char* str) +{ + String val; + + val.type = T_String; + val.sval = str; + + return val; +} + +static Integer +make_int_value_struct(int int_val) +{ + Integer val; + + val.type = T_Integer; + val.ival = int_val; + + return val; +} +#else static Value make_string_value_struct(char *str) { @@ -1480,7 +1728,7 @@ make_int_value_struct(int int_val) return val; } - +#endif /* PG_VERSION_NUM >= 150000 */ /* * --------------------- @@ -1513,7 +1761,12 @@ invoke_init_callback_internal(init_callback_params *cb_params) Oid partition_oid = cb_params->partition_relid; FmgrInfo cb_flinfo; - FunctionCallInfoData cb_fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(cb_fcinfo, 1); +#else + FunctionCallInfoData cb_fcinfo_data; + FunctionCallInfo cb_fcinfo = &cb_fcinfo_data; +#endif JsonbParseState *jsonb_state = NULL; JsonbValue *result, @@ -1602,15 +1855,15 @@ invoke_init_callback_internal(init_callback_params *cb_params) *end_value = NULL; Bound sv_datum = cb_params->params.range_params.start_value, ev_datum = cb_params->params.range_params.end_value; - Oid type = cb_params->params.range_params.value_type; + Oid value_type = cb_params->params.range_params.value_type; /* Convert min to CSTRING */ if (!IsInfinite(&sv_datum)) - start_value = datum_to_cstring(BoundGetValue(&sv_datum), type); + start_value = BoundToCString(&sv_datum, value_type); /* Convert max to CSTRING */ if (!IsInfinite(&ev_datum)) - end_value = datum_to_cstring(BoundGetValue(&ev_datum), type); + end_value = BoundToCString(&ev_datum, value_type); pushJsonbValue(&jsonb_state, WJB_BEGIN_OBJECT, NULL); @@ -1645,12 +1898,17 @@ invoke_init_callback_internal(init_callback_params *cb_params) /* Fetch function call data */ fmgr_info(cb_params->callback, &cb_flinfo); - InitFunctionCallInfoData(cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); - cb_fcinfo.arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); - cb_fcinfo.argnull[0] = false; + InitFunctionCallInfoData(*cb_fcinfo, &cb_flinfo, 1, InvalidOid, NULL, NULL); +#if PG_VERSION_NUM >= 120000 + cb_fcinfo->args[0].value = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->args[0].isnull = false; +#else + cb_fcinfo->arg[0] = PointerGetDatum(JsonbValueToJsonb(result)); + cb_fcinfo->argnull[0] = false; +#endif /* Invoke the callback */ - FunctionCallInvoke(&cb_fcinfo); + FunctionCallInvoke(cb_fcinfo); } /* Invoke a callback of a specified type */ @@ -1714,19 +1972,28 @@ validate_part_callback(Oid procid, bool emit_error) static Oid text_to_regprocedure(text *proc_signature) { - FunctionCallInfoData fcinfo; +#if PG_VERSION_NUM >= 120000 + LOCAL_FCINFO(fcinfo, 1); +#else + FunctionCallInfoData fcinfo_data; + FunctionCallInfo fcinfo = &fcinfo_data; +#endif Datum result; - InitFunctionCallInfoData(fcinfo, NULL, 1, InvalidOid, NULL, NULL); + InitFunctionCallInfoData(*fcinfo, NULL, 1, InvalidOid, NULL, NULL); -#if PG_VERSION_NUM >= 90600 - fcinfo.arg[0] = PointerGetDatum(proc_signature); +#if PG_VERSION_NUM >= 120000 + fcinfo->args[0].value = PointerGetDatum(proc_signature); + fcinfo->args[0].isnull = false; +#elif PG_VERSION_NUM >= 90600 + fcinfo->arg[0] = PointerGetDatum(proc_signature); + fcinfo->argnull[0] = false; #else - fcinfo.arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->arg[0] = CStringGetDatum(text_to_cstring(proc_signature)); + fcinfo->argnull[0] = false; #endif - fcinfo.argnull[0] = false; - result = to_regprocedure(&fcinfo); + result = to_regprocedure(fcinfo); return DatumGetObjectId(result); } @@ -1769,106 +2036,22 @@ build_partitioning_expression(Oid parent_relid, expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); expr = parse_partitioning_expression(parent_relid, expr_cstr, NULL, NULL); - pfree(expr_cstr); /* We need expression type for hash functions */ if (expr_type) { - char *expr_p_cstr; - - /* We can safely assume that this field will always remain not null */ - Assert(!isnull[Anum_pathman_config_cooked_expr - 1]); - expr_p_cstr = - TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); - /* Finally return expression type */ - *expr_type = exprType(stringToNode(expr_p_cstr)); + *expr_type = exprType( + cook_partitioning_expression(parent_relid, expr_cstr, NULL)); } if (columns) { /* Column list should be empty */ - AssertArg(*columns == NIL); + Assert(*columns == NIL); extract_column_names(expr, columns); } + pfree(expr_cstr); return expr; } - -/* - * ------------------------- - * Update trigger creation - * ------------------------- - */ - -/* Create trigger for partition */ -void -create_single_update_trigger_internal(Oid partition_relid, - const char *trigname, - List *columns) -{ - CreateTrigStmt *stmt; - List *func; - - func = list_make2(makeString(get_namespace_name(get_pathman_schema())), - makeString(CppAsString(pathman_update_trigger_func))); - - stmt = makeNode(CreateTrigStmt); - stmt->trigname = (char *) trigname; - stmt->relation = makeRangeVarFromRelid(partition_relid); - stmt->funcname = func; - stmt->args = NIL; - stmt->row = true; - stmt->timing = TRIGGER_TYPE_BEFORE; - stmt->events = TRIGGER_TYPE_UPDATE; - stmt->columns = columns; - stmt->whenClause = NULL; - stmt->isconstraint = false; - stmt->deferrable = false; - stmt->initdeferred = false; - stmt->constrrel = NULL; - - (void) CreateTrigger(stmt, NULL, InvalidOid, InvalidOid, - InvalidOid, InvalidOid, false); -} - -/* Check if relation has pg_pathman's update trigger */ -bool -has_update_trigger_internal(Oid parent_relid) -{ - bool res = false; - Relation tgrel; - SysScanDesc scan; - ScanKeyData key[1]; - HeapTuple tuple; - const char *trigname; - - /* Build update trigger's name */ - trigname = build_update_trigger_name_internal(parent_relid); - - tgrel = heap_open(TriggerRelationId, RowExclusiveLock); - - ScanKeyInit(&key[0], - Anum_pg_trigger_tgrelid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(parent_relid)); - - scan = systable_beginscan(tgrel, TriggerRelidNameIndexId, - true, NULL, lengthof(key), key); - - while (HeapTupleIsValid(tuple = systable_getnext(scan))) - { - Form_pg_trigger trigger = (Form_pg_trigger) GETSTRUCT(tuple); - - if (namestrcmp(&(trigger->tgname), trigname) == 0) - { - res = true; - break; - } - } - - systable_endscan(scan); - heap_close(tgrel, RowExclusiveLock); - - return res; -} diff --git a/src/partition_filter.c b/src/partition_filter.c index f0edf76d..3d5e4bd3 100644 --- a/src/partition_filter.c +++ b/src/partition_filter.c @@ -3,7 +3,7 @@ * partition_filter.c * Select partition for INSERT operation * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -14,12 +14,22 @@ #include "pathman.h" #include "partition_creation.h" #include "partition_filter.h" +#include "partition_router.h" #include "utils.h" +#include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/xact.h" +#include "catalog/pg_class.h" #include "catalog/pg_type.h" #include "foreign/fdwapi.h" #include "foreign/foreign.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "rewrite/rewriteManip.h" #include "utils/guc.h" #include "utils/memutils.h" @@ -68,25 +78,24 @@ CustomScanMethods partition_filter_plan_methods; CustomExecMethods partition_filter_exec_methods; -static void prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); -static void prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static ExprState *prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate); + +static void prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + static Node *fix_returning_list_mutator(Node *node, void *state); -static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte); +static Index append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel); static int append_rri_to_estate(EState *estate, ResultRelInfo *rri); -static List * pfilter_build_tlist(Relation parent_rel, List *tlist); - static void pf_memcxt_callback(void *arg); static estate_mod_data * fetch_estate_mod_data(EState *estate); @@ -94,10 +103,10 @@ static estate_mod_data * fetch_estate_mod_data(EState *estate); void init_partition_filter_static_data(void) { - partition_filter_plan_methods.CustomName = "PartitionFilter"; + partition_filter_plan_methods.CustomName = INSERT_NODE_NAME; partition_filter_plan_methods.CreateCustomScanState = partition_filter_create_scan_state; - partition_filter_exec_methods.CustomName = "PartitionFilter"; + partition_filter_exec_methods.CustomName = INSERT_NODE_NAME; partition_filter_exec_methods.BeginCustomScan = partition_filter_begin; partition_filter_exec_methods.ExecCustomScan = partition_filter_exec; partition_filter_exec_methods.EndCustomScan = partition_filter_end; @@ -107,7 +116,7 @@ init_partition_filter_static_data(void) partition_filter_exec_methods.ExplainCustomScan = partition_filter_explain; DefineCustomBoolVariable("pg_pathman.enable_partitionfilter", - "Enables the planner's use of PartitionFilter custom node.", + "Enables the planner's use of " INSERT_NODE_NAME " custom node.", NULL, &pg_pathman_enable_partition_filter, true, @@ -128,6 +137,8 @@ init_partition_filter_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&partition_filter_plan_methods); } @@ -140,138 +151,170 @@ init_partition_filter_static_data(void) /* Initialize ResultPartsStorage (hash table etc) */ void init_result_parts_storage(ResultPartsStorage *parts_storage, + Oid parent_relid, + ResultRelInfo *current_rri, EState *estate, + CmdType cmd_type, + bool close_relations, bool speculative_inserts, - Size table_entry_size, - on_new_rri_holder on_new_rri_holder_cb, - void *on_new_rri_holder_cb_arg) + rri_holder_cb init_rri_holder_cb, + void *init_rri_holder_cb_arg, + rri_holder_cb fini_rri_holder_cb, + void *fini_rri_holder_cb_arg) { HASHCTL *result_rels_table_config = &parts_storage->result_rels_table_config; memset(result_rels_table_config, 0, sizeof(HASHCTL)); result_rels_table_config->keysize = sizeof(Oid); - - /* Use sizeof(ResultRelInfoHolder) if table_entry_size is 0 */ - if (table_entry_size == ResultPartsStorageStandard) - result_rels_table_config->entrysize = sizeof(ResultRelInfoHolder); - else - result_rels_table_config->entrysize = table_entry_size; + result_rels_table_config->entrysize = sizeof(ResultPartsStorage); parts_storage->result_rels_table = hash_create("ResultRelInfo storage", 10, result_rels_table_config, HASH_ELEM | HASH_BLOBS); + Assert(current_rri); + parts_storage->base_rri = current_rri; + + Assert(estate); parts_storage->estate = estate; - parts_storage->saved_rel_info = NULL; - parts_storage->on_new_rri_holder_callback = on_new_rri_holder_cb; - parts_storage->callback_arg = on_new_rri_holder_cb_arg; + /* ResultRelInfoHolder initialization callback */ + parts_storage->init_rri_holder_cb = init_rri_holder_cb; + parts_storage->init_rri_holder_cb_arg = init_rri_holder_cb_arg; + + /* ResultRelInfoHolder finalization callback */ + parts_storage->fini_rri_holder_cb = fini_rri_holder_cb; + parts_storage->fini_rri_holder_cb_arg = fini_rri_holder_cb_arg; - /* Currenly ResultPartsStorage is used only for INSERTs */ - parts_storage->command_type = CMD_INSERT; + Assert(cmd_type == CMD_INSERT || cmd_type == CMD_UPDATE); + parts_storage->command_type = cmd_type; parts_storage->speculative_inserts = speculative_inserts; - /* Partitions must remain locked till transaction's end */ - parts_storage->head_open_lock_mode = RowExclusiveLock; - parts_storage->heap_close_lock_mode = NoLock; + /* + * Should ResultPartsStorage do ExecCloseIndices and heap_close on + * finalization? + */ + parts_storage->close_relations = close_relations; + parts_storage->head_open_lock_mode = RowExclusiveLock; + + /* Fetch PartRelationInfo for this partitioned relation */ + parts_storage->prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, parts_storage->prel, PT_ANY); + + /* Build a partitioning expression state */ + parts_storage->prel_expr_state = prepare_expr_state(parts_storage->prel, + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate); + + /* Build expression context */ + parts_storage->prel_econtext = CreateExprContext(parts_storage->estate); } /* Free ResultPartsStorage (close relations etc) */ void -fini_result_parts_storage(ResultPartsStorage *parts_storage, bool close_rels) +fini_result_parts_storage(ResultPartsStorage *parts_storage) { HASH_SEQ_STATUS stat; ResultRelInfoHolder *rri_holder; /* ResultRelInfo holder */ - /* Close partitions and free free conversion-related stuff */ - if (close_rels) + hash_seq_init(&stat, parts_storage->result_rels_table); + while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) + /* Call finalization callback if needed */ + if (parts_storage->fini_rri_holder_cb) + parts_storage->fini_rri_holder_cb(rri_holder, parts_storage); + + /* + * Close indices, unless ExecEndPlan won't do that for us (this is + * is CopyFrom which misses it, not usual executor run, essentially). + * Otherwise, it is always automaticaly closed; in <= 11, relcache + * refs of rris managed heap_open/close on their own, and ExecEndPlan + * closed them directly. Since 9ddef3, relcache management + * of executor was centralized; now rri refs are copies of ones in + * estate->es_relations, which are closed in ExecEndPlan. + * So we push our rel there, and it is also automatically closed. + */ + if (parts_storage->close_relations) { ExecCloseIndices(rri_holder->result_rel_info); - - heap_close(rri_holder->result_rel_info->ri_RelationDesc, - parts_storage->heap_close_lock_mode); - - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; - - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); - - free_conversion_map(rri_holder->tuple_map); + /* And relation itself */ + heap_close_compat(rri_holder->result_rel_info->ri_RelationDesc, + NoLock); } - } - /* Else just free conversion-related stuff */ - else - { - hash_seq_init(&stat, parts_storage->result_rels_table); - while ((rri_holder = (ResultRelInfoHolder *) hash_seq_search(&stat)) != NULL) - { - /* Skip if there's no map */ - if (!rri_holder->tuple_map) - continue; + /* Free conversion-related stuff */ + destroy_tuple_map(rri_holder->tuple_map); - FreeTupleDesc(rri_holder->tuple_map->indesc); - FreeTupleDesc(rri_holder->tuple_map->outdesc); + destroy_tuple_map(rri_holder->tuple_map_child); - free_conversion_map(rri_holder->tuple_map); - } + /* Don't forget to close 'prel'! */ + if (rri_holder->prel) + close_pathman_relation_info(rri_holder->prel); } /* Finally destroy hash table */ hash_destroy(parts_storage->result_rels_table); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(parts_storage->prel); } /* Find a ResultRelInfo for the partition using ResultPartsStorage */ ResultRelInfoHolder * -scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) +scan_result_parts_storage(EState *estate, ResultPartsStorage *parts_storage, + Oid partid) { #define CopyToResultRelInfo(field_name) \ - ( child_result_rel_info->field_name = parts_storage->saved_rel_info->field_name ) + ( child_result_rel_info->field_name = parts_storage->base_rri->field_name ) ResultRelInfoHolder *rri_holder; bool found; rri_holder = hash_search(parts_storage->result_rels_table, (const void *) &partid, - HASH_ENTER, &found); + HASH_FIND, &found); /* If not found, create & cache new ResultRelInfo */ if (!found) { Relation child_rel, - parent_rel = parts_storage->saved_rel_info->ri_RelationDesc; + base_rel; RangeTblEntry *child_rte, *parent_rte; Index child_rte_idx; ResultRelInfo *child_result_rel_info; List *translated_vars; + MemoryContext old_mcxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; + /* ResultRelInfo of partitioned table. */ + RangeTblEntry *init_rte; +#endif /* Lock partition and check if it exists */ LockRelationOid(partid, parts_storage->head_open_lock_mode); if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partid))) { - /* Don't forget to drop invalid hash table entry */ - hash_search(parts_storage->result_rels_table, - (const void *) &partid, - HASH_REMOVE, NULL); - UnlockRelationOid(partid, parts_storage->head_open_lock_mode); return NULL; } - parent_rte = rt_fetch(parts_storage->saved_rel_info->ri_RangeTableIndex, + /* Switch to query-level mcxt for allocations */ + old_mcxt = MemoryContextSwitchTo(parts_storage->estate->es_query_cxt); + + /* Create a new cache entry for this partition */ + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_ENTER, NULL); + + parent_rte = rt_fetch(parts_storage->base_rri->ri_RangeTableIndex, parts_storage->estate->es_range_table); - /* Open relation and check if it is a valid target */ - child_rel = heap_open(partid, NoLock); - CheckValidResultRel(child_rel, parts_storage->command_type); + /* Get base relation */ + base_rel = parts_storage->base_rri->ri_RelationDesc; - /* Build Var translation list for 'inserted_cols' */ - make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars); + /* Open child relation and check if it is a valid target */ + child_rel = heap_open_compat(partid, NoLock); /* Create RangeTblEntry for partition */ child_rte = makeNode(RangeTblEntry); @@ -279,6 +322,35 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) child_rte->relid = partid; child_rte->relkind = child_rel->rd_rel->relkind; child_rte->eref = parent_rte->eref; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(parts_storage->init_rri->ri_RelationDesc, + child_rel, 0, &translated_vars, NULL); + + /* + * Need to use ResultRelInfo of partitioned table 'init_rri' because + * 'base_rri' can be ResultRelInfo of partition without any + * ResultRelInfo, see expand_single_inheritance_child(). + */ + init_rte = rt_fetch(parts_storage->init_rri->ri_RangeTableIndex, + parts_storage->estate->es_range_table); + parent_perminfo = getRTEPermissionInfo(estate->es_rteperminfos, init_rte); + + child_rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&estate->es_rteperminfos, child_rte); + child_perminfo->requiredPerms = parent_perminfo->requiredPerms; + child_perminfo->checkAsUser = parent_perminfo->checkAsUser; + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, + translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, + translated_vars); + + /* Check permissions for one partition */ + ExecCheckOneRtePermissions(child_rte, child_perminfo, true); +#else + /* Build Var translation list for 'inserted_cols' */ + make_inh_translation_list(base_rel, child_rel, 0, &translated_vars, NULL); + child_rte->requiredPerms = parent_rte->requiredPerms; child_rte->checkAsUser = parent_rte->checkAsUser; child_rte->insertedCols = translate_col_privs(parent_rte->insertedCols, @@ -288,17 +360,14 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Check permissions for partition */ ExecCheckRTPerms(list_make1(child_rte), true); +#endif /* Append RangeTblEntry to estate->es_range_table */ - child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte); + child_rte_idx = append_rte_to_estate(parts_storage->estate, child_rte, child_rel); /* Create ResultRelInfo for partition */ child_result_rel_info = makeNode(ResultRelInfo); - /* Check that 'saved_rel_info' is set */ - if (!parts_storage->saved_rel_info) - elog(ERROR, "ResultPartsStorage contains no saved_rel_info"); - InitResultRelInfoCompat(child_result_rel_info, child_rel, child_rte_idx, @@ -310,39 +379,108 @@ scan_result_parts_storage(Oid partid, ResultPartsStorage *parts_storage) /* Copy necessary fields from saved ResultRelInfo */ CopyToResultRelInfo(ri_WithCheckOptions); CopyToResultRelInfo(ri_WithCheckOptionExprs); - CopyToResultRelInfo(ri_junkFilter); CopyToResultRelInfo(ri_projectReturning); +#if PG_VERSION_NUM >= 110000 + CopyToResultRelInfo(ri_onConflict); +#else CopyToResultRelInfo(ri_onConflictSetProj); CopyToResultRelInfo(ri_onConflictSetWhere); +#endif + +#if PG_VERSION_NUM < 140000 + /* field "ri_junkFilter" removed in 86dc90056dfd */ + if (parts_storage->command_type != CMD_UPDATE) + CopyToResultRelInfo(ri_junkFilter); + else + child_result_rel_info->ri_junkFilter = NULL; +#endif /* ri_ConstraintExprs will be initialized by ExecRelCheck() */ child_result_rel_info->ri_ConstraintExprs = NULL; + /* Check that this partition is a valid result relation */ + CheckValidResultRelCompat(child_result_rel_info, + parts_storage->command_type); + /* Fill the ResultRelInfo holder */ rri_holder->partid = partid; rri_holder->result_rel_info = child_result_rel_info; - /* Generate tuple transformation map and some other stuff */ - rri_holder->tuple_map = build_part_tuple_map(parent_rel, child_rel); + /* + * Generate parent->child tuple transformation map. We need to + * convert tuples because e.g. parent's TupleDesc might have dropped + * columns which child doesn't have at all because it was created after + * the drop. + */ + rri_holder->tuple_map = build_part_tuple_map(base_rel, child_rel); - /* Call on_new_rri_holder_callback() if needed */ - if (parts_storage->on_new_rri_holder_callback) - parts_storage->on_new_rri_holder_callback(parts_storage->estate, - rri_holder, - parts_storage, - parts_storage->callback_arg); + /* + * Field for child->child tuple transformation map. We need to + * convert tuples because child TupleDesc might have extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = NULL; - /* Finally append ResultRelInfo to storage->es_alloc_result_rels */ + /* Default values */ + rri_holder->prel = NULL; + rri_holder->prel_expr_state = NULL; + + if ((rri_holder->prel = get_pathman_relation_info(partid)) != NULL) + { + rri_holder->prel_expr_state = + prepare_expr_state(rri_holder->prel, /* NOTE: this prel! */ + parts_storage->base_rri->ri_RelationDesc, + parts_storage->estate); + } + + /* Call initialization callback if needed */ + if (parts_storage->init_rri_holder_cb) + parts_storage->init_rri_holder_cb(rri_holder, parts_storage); + + /* Append ResultRelInfo to storage->es_alloc_result_rels */ append_rri_to_estate(parts_storage->estate, child_result_rel_info); + + /* Don't forget to switch back! */ + MemoryContextSwitchTo(old_mcxt); } return rri_holder; } +/* Refresh PartRelationInfo for the partition in storage */ +PartRelationInfo * +refresh_result_parts_storage(ResultPartsStorage *parts_storage, Oid partid) +{ + if (partid == PrelParentRelid(parts_storage->prel)) + { + close_pathman_relation_info(parts_storage->prel); + parts_storage->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, parts_storage->prel, PT_ANY); + + return parts_storage->prel; + } + else + { + ResultRelInfoHolder *rri_holder; + + rri_holder = hash_search(parts_storage->result_rels_table, + (const void *) &partid, + HASH_FIND, NULL); + + /* We must have entry (since we got 'prel' from it) */ + Assert(rri_holder && rri_holder->prel); + + close_pathman_relation_info(rri_holder->prel); + rri_holder->prel = get_pathman_relation_info(partid); + shout_if_prel_is_invalid(partid, rri_holder->prel, PT_ANY); + + return rri_holder->prel; + } +} /* Build tuple conversion map (e.g. parent has a dropped column) */ TupleConversionMap * -build_part_tuple_map(Relation parent_rel, Relation child_rel) +build_part_tuple_map(Relation base_rel, Relation child_rel) { TupleConversionMap *tuple_map; TupleDesc child_tupdesc, @@ -352,11 +490,11 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) child_tupdesc = CreateTupleDescCopy(RelationGetDescr(child_rel)); child_tupdesc->tdtypeid = InvalidOid; - parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(parent_rel)); + parent_tupdesc = CreateTupleDescCopy(RelationGetDescr(base_rel)); parent_tupdesc->tdtypeid = InvalidOid; /* Generate tuple transformation map and some other stuff */ - tuple_map = convert_tuples_by_name(parent_tupdesc, + tuple_map = convert_tuples_by_name_compat(parent_tupdesc, child_tupdesc, ERR_PART_DESC_CONVERT); @@ -370,6 +508,75 @@ build_part_tuple_map(Relation parent_rel, Relation child_rel) return tuple_map; } +/* + * Build tuple conversion map (e.g. partition tuple has extra column(s)). + * We create a special tuple map (tuple_map_child), which, when applied to the + * tuple of partition, translates the tuple attributes into the tuple + * attributes of the same partition, discarding service attributes like "ctid" + * (i.e. working like junkFilter). + */ +TupleConversionMap * +build_part_tuple_map_child(Relation child_rel) +{ + TupleConversionMap *tuple_map; + TupleDesc child_tupdesc1; + TupleDesc child_tupdesc2; + int n; +#if PG_VERSION_NUM >= 130000 + AttrMap *attrMap; +#else + AttrNumber *attrMap; +#endif + + child_tupdesc1 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc1->tdtypeid = InvalidOid; + + child_tupdesc2 = CreateTupleDescCopy(RelationGetDescr(child_rel)); + child_tupdesc2->tdtypeid = InvalidOid; + + /* Generate tuple transformation map */ +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2, false); +#elif PG_VERSION_NUM >= 130000 + attrMap = build_attrmap_by_name(child_tupdesc1, child_tupdesc2); +#else + attrMap = convert_tuples_by_name_map(child_tupdesc1, child_tupdesc2, + ERR_PART_DESC_CONVERT); +#endif + + /* Prepare the map structure */ + tuple_map = (TupleConversionMap *) palloc(sizeof(TupleConversionMap)); + tuple_map->indesc = child_tupdesc1; + tuple_map->outdesc = child_tupdesc2; + tuple_map->attrMap = attrMap; + + /* preallocate workspace for Datum arrays */ + n = child_tupdesc1->natts; + tuple_map->outvalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->outisnull = (bool *) palloc(n * sizeof(bool)); + + n = child_tupdesc1->natts + 1; /* +1 for NULL */ + tuple_map->invalues = (Datum *) palloc(n * sizeof(Datum)); + tuple_map->inisnull = (bool *) palloc(n * sizeof(bool)); + + tuple_map->invalues[0] = (Datum) 0; /* set up the NULL entry */ + tuple_map->inisnull[0] = true; + + return tuple_map; +} + +/* Destroy tuple conversion map */ +void +destroy_tuple_map(TupleConversionMap *tuple_map) +{ + if (tuple_map) + { + FreeTupleDesc(tuple_map->indesc); + FreeTupleDesc(tuple_map->outdesc); + + free_conversion_map(tuple_map); + } +} /* * ----------------------------------- @@ -418,61 +625,156 @@ find_partitions_for_value(Datum value, Oid value_type, * Smart wrapper for scan_result_parts_storage(). */ ResultRelInfoHolder * -select_partition_for_insert(Datum value, Oid value_type, - const PartRelationInfo *prel, +select_partition_for_insert(EState *estate, ResultPartsStorage *parts_storage, - EState *estate) + TupleTableSlot *slot) { - MemoryContext old_mcxt; - ResultRelInfoHolder *rri_holder; - Oid parent_relid = PrelParentRelid(prel); - Oid selected_partid = InvalidOid; + PartRelationInfo *prel = parts_storage->prel; + ExprState *expr_state = parts_storage->prel_expr_state; + ExprContext *expr_context = parts_storage->prel_econtext; + + Oid parent_relid = PrelParentRelid(prel), + partition_relid = InvalidOid; + + Datum value; + bool isnull; + bool compute_value = true; + Oid *parts; int nparts; + ResultRelInfoHolder *result; do { + if (compute_value) + { + /* Prepare expression context */ + ResetExprContext(expr_context); + expr_context->ecxt_scantuple = slot; + + /* Execute expression */ + value = ExecEvalExprCompat(expr_state, expr_context, &isnull); + + if (isnull) + elog(ERROR, ERR_PART_ATTR_NULL); + + /* Ok, we have a value */ + compute_value = false; + } + /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); + parts = find_partitions_for_value(value, prel->ev_type, prel, &nparts); if (nparts > 1) + { elog(ERROR, ERR_PART_ATTR_MULTIPLE); + } else if (nparts == 0) { - selected_partid = create_partitions_for_value(parent_relid, - value, value_type); - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + partition_relid = create_partitions_for_value(parent_relid, + value, prel->ev_type); } - else selected_partid = parts[0]; + else partition_relid = parts[0]; - /* Replace parent table with a suitable partition */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - rri_holder = scan_result_parts_storage(selected_partid, parts_storage); - MemoryContextSwitchTo(old_mcxt); + /* Get ResultRelationInfo holder for the selected partition */ + result = scan_result_parts_storage(estate, parts_storage, partition_relid); - /* This partition has been dropped, repeat with a new 'prel' */ - if (rri_holder == NULL) + /* Somebody has dropped or created partitions */ + if ((nparts == 0 || result == NULL) && !PrelIsFresh(prel)) { - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); + /* Try building a new 'prel' for this relation */ + prel = refresh_result_parts_storage(parts_storage, parent_relid); + } - /* Get a fresh PartRelationInfo */ - prel = get_pathman_relation_info(parent_relid); + /* This partition is a parent itself */ + if (result && result->prel) + { + prel = result->prel; + expr_state = result->prel_expr_state; + parent_relid = result->partid; + compute_value = true; - /* Paranoid check (all partitions have vanished) */ - if (!prel) - elog(ERROR, "table \"%s\" is not partitioned", - get_rel_name_or_relid(parent_relid)); + /* Repeat with a new dispatch */ + result = NULL; } + + Assert(prel); } /* Loop until we get some result */ - while (rri_holder == NULL); + while (result == NULL); - return rri_holder; + return result; } +/* + * Since 13 (e1551f96e64) AttrNumber[] and map_length was combined + * into one struct AttrMap + */ +static ExprState * +prepare_expr_state(const PartRelationInfo *prel, + Relation source_rel, + EState *estate) +{ + ExprState *expr_state; + MemoryContext old_mcxt; + Node *expr; + + /* Make sure we use query memory context */ + old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); + + /* Fetch partitioning expression (we don't care about varno) */ + expr = PrelExpressionForRelid(prel, PART_EXPR_VARNO); + + /* Should we try using map? */ + if (PrelParentRelid(prel) != RelationGetRelid(source_rel)) + { +#if PG_VERSION_NUM >= 130000 + AttrMap *map; +#else + AttrNumber *map; + int map_length; +#endif + TupleDesc source_tupdesc = RelationGetDescr(source_rel); + + /* Remap expression attributes for source relation */ +#if PG_VERSION_NUM >= 130000 + map = PrelExpressionAttributesMap(prel, source_tupdesc); +#else + map = PrelExpressionAttributesMap(prel, source_tupdesc, &map_length); +#endif + + if (map) + { + bool found_whole_row; + +#if PG_VERSION_NUM >= 130000 + expr = map_variable_attnos(expr, PART_EXPR_VARNO, 0, map, + InvalidOid, + &found_whole_row); +#else + expr = map_variable_attnos_compat(expr, PART_EXPR_VARNO, 0, map, + map_length, InvalidOid, + &found_whole_row); +#endif + + if (found_whole_row) + elog(ERROR, "unexpected whole-row reference" + " found in partition key"); + +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else + pfree(map); +#endif + } + } + + /* Prepare state for expression execution */ + expr_state = ExecInitExpr((Expr *) expr, NULL); + MemoryContextSwitchTo(old_mcxt); + + return expr_state; +} /* * -------------------------------- @@ -481,42 +783,50 @@ select_partition_for_insert(Datum value, Oid value_type, */ Plan * -make_partition_filter(Plan *subplan, Oid parent_relid, +make_partition_filter(Plan *subplan, + Oid parent_relid, + Index parent_rti, OnConflictAction conflict_action, + CmdType command_type, List *returning_list) { CustomScan *cscan = makeNode(CustomScan); - Relation parent_rel; - /* Currenly we don't support ON CONFLICT clauses */ + /* Currently we don't support ON CONFLICT clauses */ if (conflict_action != ONCONFLICT_NONE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ON CONFLICT clause is not supported with partitioned tables"))); /* Copy costs etc */ - cscan->scan.plan.startup_cost = subplan->startup_cost; - cscan->scan.plan.total_cost = subplan->total_cost; - cscan->scan.plan.plan_rows = subplan->plan_rows; - cscan->scan.plan.plan_width = subplan->plan_width; + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; /* Setup methods and child plan */ cscan->methods = &partition_filter_plan_methods; cscan->custom_plans = list_make1(subplan); - /* Build an appropriate target list using a cached Relation entry */ - parent_rel = RelationIdGetRelation(parent_relid); - cscan->scan.plan.targetlist = pfilter_build_tlist(parent_rel, subplan->targetlist); - RelationClose(parent_rel); - /* No physical relation will be scanned */ cscan->scan.scanrelid = 0; - cscan->custom_scan_tlist = subplan->targetlist; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); /* Pack partitioned table's Oid and conflict_action */ - cscan->custom_private = list_make3(makeInteger(parent_relid), +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + cscan->custom_private = list_make5(makeInteger(parent_relid), + makeInteger(conflict_action), + returning_list, + makeInteger(command_type), + makeInteger(parent_rti)); +#else + cscan->custom_private = list_make4(makeInteger(parent_relid), makeInteger(conflict_action), - returning_list); + returning_list, + makeInteger(command_type)); +#endif return &cscan->scan.plan; } @@ -529,21 +839,24 @@ partition_filter_create_scan_state(CustomScan *node) state = (PartitionFilterState *) palloc0(sizeof(PartitionFilterState)); NodeSetTag(state, T_CustomScanState); - state->css.flags = node->flags; - state->css.methods = &partition_filter_exec_methods; + /* Initialize base CustomScanState */ + state->css.flags = node->flags; + state->css.methods = &partition_filter_exec_methods; /* Extract necessary variables */ - state->subplan = (Plan *) linitial(node->custom_plans); - state->partitioned_table = intVal(linitial(node->custom_private)); - state->on_conflict_action = intVal(lsecond(node->custom_private)); - state->returning_list = lthird(node->custom_private); + state->subplan = (Plan *) linitial(node->custom_plans); + state->partitioned_table = (Oid) intVal(linitial(node->custom_private)); + state->on_conflict_action = intVal(lsecond(node->custom_private)); + state->returning_list = (List *) lthird(node->custom_private); + state->command_type = (CmdType) intVal(lfourth(node->custom_private)); +#if PG_VERSION_NUM >= 160000 /* for commit 178ee1d858 */ + state->parent_rti = (Index) intVal(lfirst(list_nth_cell(node->custom_private, 4))); +#endif /* Check boundaries */ Assert(state->on_conflict_action >= ONCONFLICT_NONE || state->on_conflict_action <= ONCONFLICT_UPDATE); - state->expr_state = NULL; - /* There should be exactly one subplan */ Assert(list_length(node->custom_plans) == 1); @@ -553,50 +866,72 @@ partition_filter_create_scan_state(CustomScan *node) void partition_filter_begin(CustomScanState *node, EState *estate, int eflags) { - PartitionFilterState *state = (PartitionFilterState *) node; - - MemoryContext old_mcxt; - const PartRelationInfo *prel; - Node *expr; - Index parent_varno = 1; - ListCell *lc; + PartitionFilterState *state = (PartitionFilterState *) node; + Oid parent_relid = state->partitioned_table; + ResultRelInfo *current_rri; /* It's convenient to store PlanState in 'custom_ps' */ node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); - if (state->expr_state == NULL) + /* Fetch current result relation (rri + rel) */ + current_rri = estate->es_result_relation_info; + + /* Init ResultRelInfo cache */ + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, + state->on_conflict_action != ONCONFLICT_NONE, + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ { - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - Assert(prel != NULL); + RangeTblEntry *rte = rt_fetch(current_rri->ri_RangeTableIndex, estate->es_range_table); - /* Change varno in Vars according to range table */ - foreach(lc, estate->es_range_table) + if (rte->perminfoindex > 0) + state->result_parts.init_rri = current_rri; + else { - RangeTblEntry *entry = lfirst(lc); + /* + * Additional changes for 178ee1d858d: we cannot use current_rri + * because RTE for this ResultRelInfo has perminfoindex = 0. Need + * to use parent_rti (modify_table->nominalRelation) instead. + */ + Assert(state->parent_rti > 0); + state->result_parts.init_rri = estate->es_result_relations[state->parent_rti - 1]; + if (!state->result_parts.init_rri) + elog(ERROR, "cannot determine result info for partitioned table"); + } + } +#endif +} - if (entry->relid == state->partitioned_table) - break; +#if PG_VERSION_NUM >= 140000 +/* + * Re-initialization of PartitionFilterState for using new partition with new + * "current_rri" + */ +static void +reint_partition_filter_state(PartitionFilterState *state, ResultRelInfo *current_rri) +{ + Oid parent_relid = state->partitioned_table; + EState *estate = state->result_parts.estate; - parent_varno += 1; - } - expr = PrelExpressionForRelid(prel, parent_varno); + fini_result_parts_storage(&state->result_parts); - /* Prepare state for expression execution */ - old_mcxt = MemoryContextSwitchTo(estate->es_query_cxt); - state->expr_state = ExecInitExpr((Expr *) expr, NULL); - MemoryContextSwitchTo(old_mcxt); - } + state->returning_list = current_rri->ri_returningList; /* Init ResultRelInfo cache */ - init_result_parts_storage(&state->result_parts, estate, + init_result_parts_storage(&state->result_parts, + parent_relid, current_rri, + estate, state->command_type, + RPS_SKIP_RELATIONS, state->on_conflict_action != ONCONFLICT_NONE, - ResultPartsStorageStandard, - prepare_rri_for_insert, - (void *) state); - - state->warning_triggered = false; + RPS_RRI_CB(prepare_rri_for_insert, state), + RPS_RRI_CB(NULL, NULL)); } +#endif TupleTableSlot * partition_filter_exec(CustomScanState *node) @@ -610,77 +945,118 @@ partition_filter_exec(CustomScanState *node) slot = ExecProcNode(child_ps); - /* Save original ResultRelInfo */ - if (!state->result_parts.saved_rel_info) - state->result_parts.saved_rel_info = estate->es_result_relation_info; - if (!TupIsNull(slot)) { - MemoryContext old_mcxt; - const PartRelationInfo *prel; - ResultRelInfoHolder *rri_holder; - bool isnull; - Datum value; - TupleTableSlot *tmp_slot; - - /* Fetch PartRelationInfo for this partitioned relation */ - prel = get_pathman_relation_info(state->partitioned_table); - if (!prel) - { - if (!state->warning_triggered) - elog(WARNING, "table \"%s\" is not partitioned, " - "PartitionFilter will behave as a normal INSERT", - get_rel_name_or_relid(state->partitioned_table)); + MemoryContext old_mcxt; + ResultRelInfoHolder *rri_holder; + ResultRelInfo *rri; + JunkFilter *junkfilter = NULL; +#if PG_VERSION_NUM >= 140000 + PartitionRouterState *pr_state = linitial(node->custom_ps); - return slot; + /* + * For 14: in case UPDATE command, we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (IsPartitionRouterState(pr_state) && + state->result_parts.base_rri != pr_state->current_rri) + { /* + * Slot switched to new partition: need to + * reinitialize some PartitionFilterState variables + */ + reint_partition_filter_state(state, pr_state->current_rri); } +#else + junkfilter = estate->es_result_relation_info->ri_junkFilter; +#endif /* Switch to per-tuple context */ old_mcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - /* Execute expression */ - tmp_slot = econtext->ecxt_scantuple; - econtext->ecxt_scantuple = slot; - value = ExecEvalExprCompat(state->expr_state, econtext, &isnull, - mult_result_handler); - econtext->ecxt_scantuple = tmp_slot; - - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); - - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ - rri_holder = select_partition_for_insert(value, prel->ev_type, prel, - &state->result_parts, estate); + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(estate, &state->result_parts, slot); /* Switch back and clean up per-tuple context */ MemoryContextSwitchTo(old_mcxt); ResetExprContext(econtext); + rri = rri_holder->result_rel_info; + /* Magic: replace parent's ResultRelInfo with ours */ - estate->es_result_relation_info = rri_holder->result_rel_info; + estate->es_result_relation_info = rri; + /* + * Besides 'transform map' we should process two cases: + * 1) CMD_UPDATE, row moved to other partition, junkfilter == NULL + * (filled in router_set_slot() for SELECT + INSERT); + * we should clear attribute 'ctid' (do not insert it into database); + * 2) CMD_INSERT/CMD_UPDATE operations for partitions with deleted column(s), + * junkfilter == NULL. + */ /* If there's a transform map, rebuild the tuple */ - if (rri_holder->tuple_map) + if (rri_holder->tuple_map || + (!junkfilter && + (state->command_type == CMD_INSERT || state->command_type == CMD_UPDATE) && + (slot->tts_tupleDescriptor->natts > rri->ri_RelationDesc->rd_att->natts /* extra fields */ +#if PG_VERSION_NUM < 120000 + /* + * If we have a regular physical tuple 'slot->tts_tuple' and + * it's locally palloc'd => we will use this tuple in + * ExecMaterializeSlot() instead of materialize the slot, so + * need to check number of attributes for this tuple: + */ + || (slot->tts_tuple && slot->tts_shouldFree && + HeapTupleHeaderGetNatts(slot->tts_tuple->t_data) > + rri->ri_RelationDesc->rd_att->natts /* extra fields */) +#endif + ))) { +#if PG_VERSION_NUM < 120000 HeapTuple htup_old, htup_new; - Relation child_rel = rri_holder->result_rel_info->ri_RelationDesc; +#endif + Relation child_rel = rri->ri_RelationDesc; + TupleConversionMap *tuple_map; + if (rri_holder->tuple_map) + tuple_map = rri_holder->tuple_map; + else + { + if (!rri_holder->tuple_map_child) + { /* + * Generate child->child tuple transformation map. We need to + * convert tuples because child TupleDesc has extra + * columns ('ctid' etc.) and need remove them. + */ + rri_holder->tuple_map_child = build_part_tuple_map_child(child_rel); + } + tuple_map = rri_holder->tuple_map_child; + } + + /* xxx why old code decided to materialize it? */ +#if PG_VERSION_NUM < 120000 htup_old = ExecMaterializeSlot(slot); - htup_new = do_convert_tuple(htup_old, rri_holder->tuple_map); + htup_new = do_convert_tuple(htup_old, tuple_map); + ExecClearTuple(slot); +#endif - /* Allocate new slot if needed */ + /* + * Allocate new slot if needed. + * For 12, it is sort of important to create BufferHeapTuple, + * though we will store virtual one there. Otherwise, ModifyTable + * decides to copy it to mt_scans slot which has tupledesc of + * parent. + */ if (!state->tup_convert_slot) - state->tup_convert_slot = MakeTupleTableSlot(); + state->tup_convert_slot = MakeTupleTableSlotCompat(&TTSOpsBufferHeapTuple); + /* TODO: why should we *always* set a new slot descriptor? */ ExecSetSlotDescriptor(state->tup_convert_slot, RelationGetDescr(child_rel)); - ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); - - /* Now replace the original slot */ - slot = state->tup_convert_slot; +#if PG_VERSION_NUM >= 120000 + slot = execute_attr_map_slot(tuple_map->attrMap, slot, state->tup_convert_slot); +#else + slot = ExecStoreTuple(htup_new, state->tup_convert_slot, InvalidBuffer, true); +#endif } return slot; @@ -695,7 +1071,7 @@ partition_filter_end(CustomScanState *node) PartitionFilterState *state = (PartitionFilterState *) node; /* Executor will close rels via estate->es_result_relations */ - fini_result_parts_storage(&state->result_parts, false); + fini_result_parts_storage(&state->result_parts); Assert(list_length(node->custom_ps) == 1); ExecEndNode((PlanState *) linitial(node->custom_ps)); @@ -708,8 +1084,7 @@ partition_filter_end(CustomScanState *node) void partition_filter_rescan(CustomScanState *node) { - Assert(list_length(node->custom_ps) == 1); - ExecReScan((PlanState *) linitial(node->custom_ps)); + elog(ERROR, "partition_filter_rescan is not implemented"); } void @@ -723,61 +1098,46 @@ partition_filter_explain(CustomScanState *node, List *ancestors, ExplainState *e /* * Build partition filter's target list pointing to subplan tuple's elements. */ -static List * -pfilter_build_tlist(Relation parent_rel, List *tlist) +List * +pfilter_build_tlist(Plan *subplan, Index varno) { List *result_tlist = NIL; ListCell *lc; - int i = 1; - foreach (lc, tlist) + foreach (lc, subplan->targetlist) { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - Expr *col_expr; - Form_pg_attribute attr; - - /* Make sure that this attribute exists */ - if (i > RelationGetDescr(parent_rel)->natts) - elog(ERROR, "error in function " CppAsString(pfilter_build_tlist)); + TargetEntry *tle = (TargetEntry *) lfirst(lc), + *newtle = NULL; - /* Fetch pg_attribute entry for this column */ - attr = RelationGetDescr(parent_rel)->attrs[i - 1]; - - /* If this column is dropped, create a placeholder Const */ - if (attr->attisdropped) + if (IsA(tle->expr, Const)) { - /* Insert NULL for dropped column */ - col_expr = (Expr *) makeConst(INT4OID, - -1, - InvalidOid, - sizeof(int32), - (Datum) 0, - true, - true); + /* TODO: maybe we should use copyObject(tle->expr)? */ + newtle = makeTargetEntry(tle->expr, + tle->resno, + tle->resname, + tle->resjunk); } - /* Otherwise we should create a Var referencing subplan's output */ else { - col_expr = (Expr *) makeVar(INDEX_VAR, /* point to subplan's elements */ - i, /* direct attribute mapping */ - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); + Var *var = makeVar(varno, /* point to subplan's elements */ + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + newtle = makeTargetEntry((Expr *) var, + tle->resno, + tle->resname, + tle->resjunk); } - result_tlist = lappend(result_tlist, - makeTargetEntry(col_expr, - i, - NULL, - tle->resjunk)); - i++; /* next resno */ + result_tlist = lappend(result_tlist, newtle); } return result_tlist; } - /* * ---------------------------------------------- * Additional init steps for ResultPartsStorage @@ -786,21 +1146,17 @@ pfilter_build_tlist(Relation parent_rel, List *tlist) /* Main trigger */ static void -prepare_rri_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - prepare_rri_returning_for_insert(estate, rri_holder, rps_storage, arg); - prepare_rri_fdw_for_insert(estate, rri_holder, rps_storage, arg); + prepare_rri_returning_for_insert(rri_holder, rps_storage); + prepare_rri_fdw_for_insert(rri_holder, rps_storage); } /* Prepare 'RETURNING *' tlist & projection */ static void -prepare_rri_returning_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_returning_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { PartitionFilterState *pfstate; List *returning_list; @@ -813,7 +1169,7 @@ prepare_rri_returning_for_insert(EState *estate, if (!rri_holder->tuple_map) return; - pfstate = (PartitionFilterState *) arg; + pfstate = (PartitionFilterState *) rps_storage->init_rri_holder_cb_arg; returning_list = pfstate->returning_list; /* Exit if there's no RETURNING list */ @@ -821,13 +1177,9 @@ prepare_rri_returning_for_insert(EState *estate, return; child_rri = rri_holder->result_rel_info; - parent_rri = rps_storage->saved_rel_info; + parent_rri = rps_storage->base_rri; parent_rt_idx = parent_rri->ri_RangeTableIndex; - /* Create ExprContext for tuple projections */ - if (!pfstate->tup_convert_econtext) - pfstate->tup_convert_econtext = CreateExprContext(estate); - /* Replace parent's varattnos with child's */ returning_list = (List *) fix_returning_list_mutator((Node *) returning_list, @@ -843,21 +1195,22 @@ prepare_rri_returning_for_insert(EState *estate, /* Build new projection info */ child_rri->ri_projectReturning = - ExecBuildProjectionInfoCompat(returning_list, pfstate->tup_convert_econtext, + ExecBuildProjectionInfoCompat(returning_list, pfstate->css.ss.ps.ps_ExprContext, result_slot, NULL /* HACK: no PlanState */, RelationGetDescr(child_rri->ri_RelationDesc)); } /* Prepare FDW access structs */ static void -prepare_rri_fdw_for_insert(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_fdw_for_insert(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { ResultRelInfo *rri = rri_holder->result_rel_info; FdwRoutine *fdw_routine = rri->ri_FdwRoutine; Oid partid; + EState *estate; + + estate = rps_storage->estate; /* Nothing to do if not FDW */ if (fdw_routine == NULL) @@ -890,6 +1243,7 @@ prepare_rri_fdw_for_insert(EState *estate, elog(ERROR, "FDWs other than postgres_fdw are restricted"); + break; case PF_FDW_INSERT_ANY_FDW: elog(WARNING, "unrestricted FDW mode may lead to crashes"); @@ -905,13 +1259,18 @@ prepare_rri_fdw_for_insert(EState *estate, if (fdw_routine->PlanForeignModify) { RangeTblEntry *rte; - ModifyTableState mtstate; - List *fdw_private; Query query; + PlanState pstate, + *pstate_ptr; + ModifyTableState mtstate; PlannedStmt *plan; + + /* This is the value we'd like to get */ + List *fdw_private; + TupleDesc tupdesc; - int i, - target_attr; + int target_attr, + i; /* Fetch RangeTblEntry for partition */ rte = rt_fetch(rri->ri_RangeTableIndex, estate->es_range_table); @@ -932,6 +1291,14 @@ prepare_rri_fdw_for_insert(EState *estate, query.targetList = NIL; query.returningList = NIL; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* + * Copy the RTEPermissionInfos into query as well, so that + * add_rte_to_flat_rtable() will work correctly. + */ + query.rteperminfos = estate->es_rteperminfos; +#endif + /* Generate 'query.targetList' using 'tupdesc' */ target_attr = 1; for (i = 0; i < tupdesc->natts; i++) @@ -940,7 +1307,7 @@ prepare_rri_fdw_for_insert(EState *estate, TargetEntry *te; Param *param; - attr = tupdesc->attrs[i]; + attr = TupleDescAttr(tupdesc, i); if (attr->attisdropped) continue; @@ -962,24 +1329,46 @@ prepare_rri_fdw_for_insert(EState *estate, target_attr++; } - /* Create fake ModifyTableState */ - memset((void *) &mtstate, 0, sizeof(ModifyTableState)); + /* HACK: plan a fake query for FDW access to be planned as well */ + elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); +#if PG_VERSION_NUM >= 130000 + plan = standard_planner(&query, NULL, 0, NULL); +#else + plan = standard_planner(&query, 0, NULL); +#endif + + /* HACK: create a fake PlanState */ + memset(&pstate, 0, sizeof(PlanState)); + pstate.plan = plan->planTree; + pstate_ptr = &pstate; + + /* HACK: create a fake ModifyTableState */ + memset(&mtstate, 0, sizeof(ModifyTableState)); NodeSetTag(&mtstate, T_ModifyTableState); mtstate.ps.state = estate; mtstate.operation = CMD_INSERT; +#if PG_VERSION_NUM >= 140000 + /* + * Some fields ("mt_plans", "mt_nplans", "mt_whichplan") removed + * in 86dc90056dfd + */ + outerPlanState(&mtstate.ps) = pstate_ptr; + mtstate.mt_nrels = 1; +#else + mtstate.mt_plans = &pstate_ptr; + mtstate.mt_nplans = 1; + mtstate.mt_whichplan = 0; +#endif mtstate.resultRelInfo = rri; +#if PG_VERSION_NUM < 110000 mtstate.mt_onconflict = ONCONFLICT_NONE; - - /* Plan fake query in for FDW access to be planned as well */ - elog(DEBUG1, "FDW(%u): plan fake query for fdw_private", partid); - plan = standard_planner(&query, 0, NULL); +#endif /* Extract fdw_private from useless plan */ elog(DEBUG1, "FDW(%u): extract fdw_private", partid); - fdw_private = (List *) - linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); + fdw_private = linitial(((ModifyTable *) plan->planTree)->fdwPrivLists); - /* call BeginForeignModify on 'rri' */ + /* HACK: call BeginForeignModify on 'rri' */ elog(DEBUG1, "FDW(%u): call BeginForeignModify on a fake INSERT node", partid); fdw_routine->BeginForeignModify(&mtstate, rri, fdw_private, 0, 0); @@ -1029,7 +1418,11 @@ fix_returning_list_mutator(Node *node, void *state) for (i = 0; i < rri_holder->tuple_map->outdesc->natts; i++) { /* Good, 'varattno' of parent is child's 'i+1' */ +#if PG_VERSION_NUM >= 130000 + if (var->varattno == rri_holder->tuple_map->attrMap->attnums[i]) +#else if (var->varattno == rri_holder->tuple_map->attrMap[i]) +#endif { var->varattno = i + 1; /* attnos begin with 1 */ found_mapping = true; @@ -1046,7 +1439,7 @@ fix_returning_list_mutator(Node *node, void *state) return (Node *) var; } - return expression_tree_mutator(node, fix_returning_list_mutator, state); + return expression_tree_mutator_compat(node, fix_returning_list_mutator, state); } @@ -1058,7 +1451,7 @@ fix_returning_list_mutator(Node *node, void *state) /* Append RangeTblEntry 'rte' to estate->es_range_table */ static Index -append_rte_to_estate(EState *estate, RangeTblEntry *rte) +append_rte_to_estate(EState *estate, RangeTblEntry *rte, Relation child_rel) { estate_mod_data *emd_struct = fetch_estate_mod_data(estate); @@ -1071,6 +1464,34 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte) /* Update estate_mod_data */ emd_struct->estate_not_modified = false; +#if PG_VERSION_NUM >= 120000 + estate->es_range_table_size = list_length(estate->es_range_table); +#endif +#if PG_VERSION_NUM >= 120000 && PG_VERSION_NUM < 130000 + /* + * On PG = 12, also add rte to es_range_table_array. This is horribly + * inefficient, yes. + * In 12 es_range_table_array ptr is not saved anywhere in + * core, so it is safe to repalloc. + * + * In >= 13 (3c92658) es_range_table_array was removed + */ + estate->es_range_table_array = (RangeTblEntry **) + repalloc(estate->es_range_table_array, + estate->es_range_table_size * sizeof(RangeTblEntry *)); + estate->es_range_table_array[estate->es_range_table_size - 1] = rte; +#endif + +#if PG_VERSION_NUM >= 120000 + /* + * Also reallocate es_relations, because es_range_table_size defines its + * len. This also ensures ExecEndPlan will close the rel. + */ + estate->es_relations = (Relation *) + repalloc(estate->es_relations, estate->es_range_table_size * sizeof(Relation)); + estate->es_relations[estate->es_range_table_size - 1] = child_rel; +#endif + return list_length(estate->es_range_table); } @@ -1078,14 +1499,46 @@ append_rte_to_estate(EState *estate, RangeTblEntry *rte) static int append_rri_to_estate(EState *estate, ResultRelInfo *rri) { - estate_mod_data *emd_struct = fetch_estate_mod_data(estate); - int result_rels_allocated = emd_struct->estate_alloc_result_rels; + estate_mod_data *emd_struct = fetch_estate_mod_data(estate); + int result_rels_allocated = emd_struct->estate_alloc_result_rels; +#if PG_VERSION_NUM >= 140000 /* reworked in commit a04daa97a433 */ + ResultRelInfo **rri_array = estate->es_result_relations; + + /* + * We already increased variable "estate->es_range_table_size" in previous + * call append_rte_to_estate(): see + * "estate->es_range_table_size = list_length(estate->es_range_table)" + * after "lappend(estate->es_range_table, rte)". So we should append + * new value in "estate->es_result_relations" only. + */ + + /* Reallocate estate->es_result_relations if needed */ + if (result_rels_allocated < estate->es_range_table_size) + { + result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; + estate->es_result_relations = palloc(result_rels_allocated * + sizeof(ResultRelInfo *)); + memcpy(estate->es_result_relations, + rri_array, + (estate->es_range_table_size - 1) * sizeof(ResultRelInfo *)); + } + + estate->es_result_relations[estate->es_range_table_size - 1] = rri; + estate->es_opened_result_relations = lappend(estate->es_opened_result_relations, rri); + + /* Update estate_mod_data */ + emd_struct->estate_alloc_result_rels = result_rels_allocated; + emd_struct->estate_not_modified = false; + + return estate->es_range_table_size; +#else /* Reallocate estate->es_result_relations if needed */ if (result_rels_allocated <= estate->es_num_result_relations) { ResultRelInfo *rri_array = estate->es_result_relations; + /* HACK: we can't repalloc or free previous array (there might be users) */ result_rels_allocated = result_rels_allocated * ALLOC_EXP + 1; estate->es_result_relations = palloc(result_rels_allocated * sizeof(ResultRelInfo)); @@ -1106,6 +1559,7 @@ append_rri_to_estate(EState *estate, ResultRelInfo *rri) emd_struct->estate_not_modified = false; return estate->es_num_result_relations++; +#endif } @@ -1134,13 +1588,21 @@ fetch_estate_mod_data(EState *estate) if (cb->func == pf_memcxt_callback) return (estate_mod_data *) cb->arg; - cb = estate_mcxt->reset_cbs->next; + cb = cb->next; } /* Have to create a new one */ emd_struct = MemoryContextAlloc(estate_mcxt, sizeof(estate_mod_data)); emd_struct->estate_not_modified = true; +#if PG_VERSION_NUM >= 140000 + /* + * Reworked in commit a04daa97a433: field "es_num_result_relations" + * removed + */ + emd_struct->estate_alloc_result_rels = estate->es_range_table_size; +#else emd_struct->estate_alloc_result_rels = estate->es_num_result_relations; +#endif cb = MemoryContextAlloc(estate_mcxt, sizeof(MemoryContextCallback)); cb->func = pf_memcxt_callback; diff --git a/src/partition_overseer.c b/src/partition_overseer.c new file mode 100644 index 00000000..d858374a --- /dev/null +++ b/src/partition_overseer.c @@ -0,0 +1,189 @@ +#include "postgres.h" + +#include "partition_filter.h" +#include "partition_overseer.h" +#include "partition_router.h" +#include "planner_tree_modification.h" + +CustomScanMethods partition_overseer_plan_methods; +CustomExecMethods partition_overseer_exec_methods; + +void +init_partition_overseer_static_data(void) +{ + partition_overseer_plan_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_plan_methods.CreateCustomScanState = partition_overseer_create_scan_state; + + partition_overseer_exec_methods.CustomName = OVERSEER_NODE_NAME; + partition_overseer_exec_methods.BeginCustomScan = partition_overseer_begin; + partition_overseer_exec_methods.ExecCustomScan = partition_overseer_exec; + partition_overseer_exec_methods.EndCustomScan = partition_overseer_end; + partition_overseer_exec_methods.ReScanCustomScan = partition_overseer_rescan; + partition_overseer_exec_methods.MarkPosCustomScan = NULL; + partition_overseer_exec_methods.RestrPosCustomScan = NULL; + partition_overseer_exec_methods.ExplainCustomScan = partition_overseer_explain; + + RegisterCustomScanMethods(&partition_overseer_plan_methods); +} + +Plan * +make_partition_overseer(Plan *subplan) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_overseer_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = NIL; + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, INDEX_VAR); + cscan->custom_scan_tlist = subplan->targetlist; + + return &cscan->scan.plan; +} + + +Node * +partition_overseer_create_scan_state(CustomScan *node) +{ + CustomScanState *state = palloc0(sizeof(CustomScanState)); + NodeSetTag(state, T_CustomScanState); + + state->flags = node->flags; + state->methods = &partition_overseer_exec_methods; + + return (Node *) state; +} + +static void +set_mt_state_for_router(PlanState *state, void *context) +{ +#if PG_VERSION_NUM < 140000 + int i; +#endif + ModifyTableState *mt_state = (ModifyTableState *) state; + + if (!IsA(state, ModifyTableState)) + return; + +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_plans", "mt_nplans" removed in 86dc90056dfd */ + { + CustomScanState *pf_state = (CustomScanState *) outerPlanState(mt_state); +#else + for (i = 0; i < mt_state->mt_nplans; i++) + { + CustomScanState *pf_state = (CustomScanState *) mt_state->mt_plans[i]; +#endif + /* Check if this is a PartitionFilter + PartitionRouter combo */ + if (IsPartitionFilterState(pf_state)) + { + PartitionRouterState *pr_state = linitial(pf_state->custom_ps); + if (IsPartitionRouterState(pr_state)) + { + /* HACK: point to ModifyTable in PartitionRouter */ + pr_state->mt_state = mt_state; + } + } + } +} + +void +partition_overseer_begin(CustomScanState *node, + EState *estate, + int eflags) +{ + CustomScan *css = (CustomScan *) node->ss.ps.plan; + Plan *plan = linitial(css->custom_plans); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(plan, estate, eflags)); + + /* Save ModifyTableState in PartitionRouterState structs */ + state_tree_visitor((PlanState *) linitial(node->custom_ps), + set_mt_state_for_router, + NULL); +} + +TupleTableSlot * +partition_overseer_exec(CustomScanState *node) +{ + ModifyTableState *mt_state = linitial(node->custom_ps); + + TupleTableSlot *slot; + int mt_plans_old, + mt_plans_new; + + /* Get initial signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_old = mt_state->mt_nrels; +#else + mt_plans_old = mt_state->mt_nplans; +#endif + +restart: + /* Run ModifyTable */ + slot = ExecProcNode((PlanState *) mt_state); + + /* Get current signal */ +#if PG_VERSION_NUM >= 140000 /* field "mt_nplans" removed in 86dc90056dfd */ + mt_plans_new = MTHackField(mt_state, mt_nrels); +#else + mt_plans_new = MTHackField(mt_state, mt_nplans); +#endif + + /* Did PartitionRouter ask us to restart? */ + if (mt_plans_new != mt_plans_old) + { + /* Signal points to current plan */ +#if PG_VERSION_NUM < 140000 + int state_idx = -mt_plans_new; +#endif + + /* HACK: partially restore ModifyTable's state */ + MTHackField(mt_state, mt_done) = false; +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = mt_plans_old; +#else + MTHackField(mt_state, mt_nplans) = mt_plans_old; + MTHackField(mt_state, mt_whichplan) = state_idx; +#endif + + /* Rerun ModifyTable */ + goto restart; + } + + return slot; +} + +void +partition_overseer_end(CustomScanState *node) +{ + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); +} + +void +partition_overseer_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_overseer_rescan is not implemented"); +} + +void +partition_overseer_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* Nothing to do here now */ +} diff --git a/src/partition_router.c b/src/partition_router.c new file mode 100644 index 00000000..5f00e9b1 --- /dev/null +++ b/src/partition_router.c @@ -0,0 +1,746 @@ +/* ------------------------------------------------------------------------ + * + * partition_router.c + * Route row to a right partition in UPDATE operation + * + * Copyright (c) 2017, Postgres Professional + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * ------------------------------------------------------------------------ + */ + +#include "partition_filter.h" +#include "partition_router.h" +#include "compat/pg_compat.h" + +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#include "access/tableam.h" +#endif +#include "access/xact.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" /* direct heap_delete, no-no */ +#endif +#include "access/htup_details.h" +#include "catalog/pg_class.h" +#include "commands/trigger.h" +#include "executor/nodeModifyTable.h" +#include "foreign/fdwapi.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/makefuncs.h" /* make_ands_explicit */ +#include "optimizer/optimizer.h" +#endif +#include "optimizer/clauses.h" +#include "storage/bufmgr.h" +#include "utils/guc.h" +#include "utils/rel.h" + + +#define MTDisableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + (pr_state)->insert_stmt_triggers |= triggers->trig_insert_after_statement; \ + (pr_state)->update_stmt_triggers |= triggers->trig_update_after_statement; \ + triggers->trig_insert_after_statement = false; \ + triggers->trig_update_after_statement = false; \ + } \ + } while (0) + +#define MTEnableStmtTriggers(mt_state, pr_state) \ + do { \ + TriggerDesc *triggers = (mt_state)->resultRelInfo->ri_TrigDesc; \ + \ + if (triggers) \ + { \ + triggers->trig_insert_after_statement = (pr_state)->insert_stmt_triggers; \ + triggers->trig_update_after_statement = (pr_state)->update_stmt_triggers; \ + } \ + } while (0) + + + +bool pg_pathman_enable_partition_router = false; + +CustomScanMethods partition_router_plan_methods; +CustomExecMethods partition_router_exec_methods; + +static TupleTableSlot *router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation); +static TupleTableSlot *router_get_slot(PartitionRouterState *state, + EState *estate, + bool *should_process); + +static void router_lazy_init_constraint(PartitionRouterState *state, bool recreate); + +static ItemPointerData router_extract_ctid(PartitionRouterState *state, + TupleTableSlot *slot); + +static TupleTableSlot *router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted); + +void +init_partition_router_static_data(void) +{ + partition_router_plan_methods.CustomName = UPDATE_NODE_NAME; + partition_router_plan_methods.CreateCustomScanState = partition_router_create_scan_state; + + partition_router_exec_methods.CustomName = UPDATE_NODE_NAME; + partition_router_exec_methods.BeginCustomScan = partition_router_begin; + partition_router_exec_methods.ExecCustomScan = partition_router_exec; + partition_router_exec_methods.EndCustomScan = partition_router_end; + partition_router_exec_methods.ReScanCustomScan = partition_router_rescan; + partition_router_exec_methods.MarkPosCustomScan = NULL; + partition_router_exec_methods.RestrPosCustomScan = NULL; + partition_router_exec_methods.ExplainCustomScan = partition_router_explain; + + DefineCustomBoolVariable("pg_pathman.enable_partitionrouter", + "Enables the planner's use of " UPDATE_NODE_NAME " custom node.", + NULL, + &pg_pathman_enable_partition_router, + false, + PGC_USERSET, + 0, + NULL, + NULL, + NULL); + + RegisterCustomScanMethods(&partition_router_plan_methods); +} + +Plan * +make_partition_router(Plan *subplan, int epq_param, Index parent_rti) +{ + CustomScan *cscan = makeNode(CustomScan); + + /* Copy costs etc */ + cscan->scan.plan.startup_cost = subplan->startup_cost; + cscan->scan.plan.total_cost = subplan->total_cost; + cscan->scan.plan.plan_rows = subplan->plan_rows; + cscan->scan.plan.plan_width = subplan->plan_width; + + /* Setup methods, child plan and param number for EPQ */ + cscan->methods = &partition_router_plan_methods; + cscan->custom_plans = list_make1(subplan); + cscan->custom_private = list_make1(makeInteger(epq_param)); + + /* No physical relation will be scanned */ + cscan->scan.scanrelid = 0; + + /* Build an appropriate target list */ + cscan->scan.plan.targetlist = pfilter_build_tlist(subplan, parent_rti); + + return &cscan->scan.plan; +} + +Node * +partition_router_create_scan_state(CustomScan *node) +{ + PartitionRouterState *state; + + state = (PartitionRouterState *) palloc0(sizeof(PartitionRouterState)); + NodeSetTag(state, T_CustomScanState); + + state->css.flags = node->flags; + state->css.methods = &partition_router_exec_methods; + + /* Extract necessary variables */ + state->epqparam = intVal(linitial(node->custom_private)); + state->subplan = (Plan *) linitial(node->custom_plans); + + return (Node *) state; +} + +void +partition_router_begin(CustomScanState *node, EState *estate, int eflags) +{ + PartitionRouterState *state = (PartitionRouterState *) node; + + /* Remember current relation we're going to delete from */ + state->current_rri = estate->es_result_relation_info; + + EvalPlanQualInit_compat(&state->epqstate, estate, + state->subplan, NIL, + state->epqparam); + + /* It's convenient to store PlanState in 'custom_ps' */ + node->custom_ps = list_make1(ExecInitNode(state->subplan, estate, eflags)); +} + +TupleTableSlot * +partition_router_exec(CustomScanState *node) +{ + EState *estate = node->ss.ps.state; + PartitionRouterState *state = (PartitionRouterState *) node; + TupleTableSlot *slot; + bool should_process; + +take_next_tuple: + /* Get next tuple for processing */ + slot = router_get_slot(state, estate, &should_process); + + if (should_process) + { + CmdType new_cmd; + bool deleted; + ItemPointerData ctid; + /* Variables for prepare a full "new" tuple, after 86dc90056dfd */ +#if PG_VERSION_NUM >= 140000 + TupleTableSlot *old_slot; + ResultRelInfo *rri; +#endif + TupleTableSlot *full_slot; + bool partition_changed = false; + + ItemPointerSetInvalid(&ctid); + +#if PG_VERSION_NUM < 140000 + full_slot = slot; + + /* Build new junkfilter if needed */ + if (state->junkfilter == NULL) + state->junkfilter = state->current_rri->ri_junkFilter; +#else + if (slot->tts_tableOid == InvalidOid) + elog(ERROR, "invalid table OID in returned tuple"); + + /* + * For 14: in case UPDATE command we can scanning several partitions + * in one plan. Need to switch context each time partition is switched. + */ + if (RelationGetRelid(state->current_rri->ri_RelationDesc) != slot->tts_tableOid) + { + /* + * Function router_get_slot() switched to new partition: need to + * reinitialize some PartitionRouterState variables + */ + state->current_rri = ExecLookupResultRelByOid(state->mt_state, + slot->tts_tableOid, false, false); + partition_changed = true; + } +#endif + + /* Build recheck constraint state lazily (and re-create constraint + * in case we start scan another relation) */ + router_lazy_init_constraint(state, partition_changed); + + /* Extract item pointer from current tuple */ + ctid = router_extract_ctid(state, slot); + Assert(ItemPointerIsValid(&ctid)); + + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = state->current_rri; + +#if PG_VERSION_NUM >= 140000 /* after 86dc90056dfd */ + /* Store original slot */ + estate->es_original_tuple = slot; + /* + * "slot" contains new values of the changed columns plus row + * identity information such as CTID. + * Need to prepare a "newSlot" with full tuple for triggers in + * router_lock_or_delete_tuple(). But we should return old slot + * with CTID because this CTID is used in ExecModifyTable(). + */ + rri = state->current_rri; + + /* Initialize projection info if first time for this table. */ + if (unlikely(!rri->ri_projectNewInfoValid)) +#ifdef PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION + PgproExecInitUpdateProjection(state->mt_state, rri); +#else + ExecInitUpdateProjection(state->mt_state, rri); +#endif /* !PG_HAVE_PGPRO_EXEC_INIT_UPDATE_PROJECTION */ + + old_slot = rri->ri_oldTupleSlot; + /* Fetch the most recent version of old tuple. */ + if (!table_tuple_fetch_row_version(rri->ri_RelationDesc, + &ctid, SnapshotAny, old_slot)) + elog(ERROR, "failed to fetch partition tuple being updated"); + + /* Build full tuple (using "old_slot" + changed from "slot"): */ + full_slot = ExecGetUpdateNewTuple(rri, slot, old_slot); +#endif /* PG_VERSION_NUM >= 140000 */ + + /* Lock or delete tuple from old partition */ + full_slot = router_lock_or_delete_tuple(state, full_slot, + &ctid, &deleted); + + /* We require a tuple (previous one has vanished) */ + if (TupIsNull(full_slot)) + goto take_next_tuple; + + /* Should we use UPDATE or DELETE + INSERT? */ + new_cmd = deleted ? CMD_INSERT : CMD_UPDATE; + + /* Alter ModifyTable's state and return */ + return router_set_slot(state, full_slot, new_cmd); + } + + return slot; +} + +void +partition_router_end(CustomScanState *node) +{ + PartitionRouterState *state = (PartitionRouterState *) node; + + Assert(list_length(node->custom_ps) == 1); + ExecEndNode((PlanState *) linitial(node->custom_ps)); + + EvalPlanQualEnd(&state->epqstate); +} + +void +partition_router_rescan(CustomScanState *node) +{ + elog(ERROR, "partition_router_rescan is not implemented"); +} + +void +partition_router_explain(CustomScanState *node, + List *ancestors, + ExplainState *es) +{ + /* Nothing to do here now */ +} + +/* Return tuple OR yield it and change ModifyTable's operation */ +static TupleTableSlot * +router_set_slot(PartitionRouterState *state, + TupleTableSlot *slot, + CmdType operation) +{ + ModifyTableState *mt_state = state->mt_state; + + /* Fast path for correct operation type */ + if (mt_state->operation == operation) + return slot; + + /* HACK: alter ModifyTable's state */ +#if PG_VERSION_NUM >= 140000 + /* Fields "mt_nplans", "mt_whichplan" removed in 86dc90056dfd */ + MTHackField(mt_state, mt_nrels) = -mt_state->mt_nrels; +#else + MTHackField(mt_state, mt_nplans) = -mt_state->mt_whichplan; +#endif + MTHackField(mt_state, operation) = operation; + + /* HACK: disable AFTER STATEMENT triggers */ + MTDisableStmtTriggers(mt_state, state); + + if (!TupIsNull(slot)) + { + EState *estate = mt_state->ps.state; + +#if PG_VERSION_NUM < 140000 /* field "ri_junkFilter" removed in 86dc90056dfd */ + /* We should've cached junk filter already */ + Assert(state->junkfilter); + + /* HACK: conditionally disable junk filter in result relation */ + state->current_rri->ri_junkFilter = (operation == CMD_UPDATE) ? + state->junkfilter : + NULL; +#endif + + /* Don't forget to set saved_slot! */ + state->yielded_slot = ExecInitExtraTupleSlotCompat(estate, + slot->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_slot, slot); +#if PG_VERSION_NUM >= 140000 + Assert(estate->es_original_tuple != NULL); + state->yielded_original_slot = ExecInitExtraTupleSlotCompat(estate, + estate->es_original_tuple->tts_tupleDescriptor, + &TTSOpsHeapTuple); + ExecCopySlot(state->yielded_original_slot, estate->es_original_tuple); +#endif + } + + /* Yield */ + state->yielded = true; + return NULL; +} + +/* Fetch next tuple (either fresh or yielded) */ +static TupleTableSlot * +router_get_slot(PartitionRouterState *state, + EState *estate, + bool *should_process) +{ + TupleTableSlot *slot; + + /* Do we have a preserved slot? */ + if (state->yielded) + { + /* HACK: enable AFTER STATEMENT triggers */ + MTEnableStmtTriggers(state->mt_state, state); + + /* Reset saved slot */ + slot = state->yielded_slot; + state->yielded_slot = NULL; +#if PG_VERSION_NUM >= 140000 + estate->es_original_tuple = state->yielded_original_slot; + state->yielded_original_slot = NULL; +#endif + state->yielded = false; + + /* We shouldn't process preserved slot... */ + *should_process = false; + } + else + { + /* Fetch next tuple */ + slot = ExecProcNode((PlanState *) linitial(state->css.custom_ps)); + + /* Restore operation type for AFTER STATEMENT triggers */ + if (TupIsNull(slot)) + slot = router_set_slot(state, NULL, CMD_UPDATE); + + /* But we have to process non-empty slot */ + *should_process = !TupIsNull(slot); + } + + return slot; +} + +static void +router_lazy_init_constraint(PartitionRouterState *state, bool reinit) +{ + if (state->constraint == NULL || reinit) + { + Relation rel = state->current_rri->ri_RelationDesc; + Oid relid = RelationGetRelid(rel); + List *clauses = NIL; + Expr *expr; + + while (OidIsValid(relid)) + { + /* It's probably OK if expression is NULL */ + expr = get_partition_constraint_expr(relid, false); + expr = expression_planner(expr); + + if (!expr) + break; + + /* Add this constraint to set */ + clauses = lappend(clauses, expr); + + /* Consider parent's check constraint as well */ + relid = get_parent_of_partition(relid); + } + + if (!clauses) + elog(ERROR, "no recheck constraint for relid %d", relid); + + state->constraint = ExecInitExpr(make_ands_explicit(clauses), NULL); + } +} + +/* Extract ItemPointer from tuple using JunkFilter */ +static ItemPointerData +router_extract_ctid(PartitionRouterState *state, TupleTableSlot *slot) +{ + Relation rel = state->current_rri->ri_RelationDesc; + char relkind = RelationGetForm(rel)->relkind; + + if (relkind == RELKIND_RELATION) + { + Datum ctid_datum; + bool ctid_isnull; + + ctid_datum = ExecGetJunkAttribute(slot, +#if PG_VERSION_NUM >= 140000 /* field "junkfilter" removed in 86dc90056dfd */ + state->current_rri->ri_RowIdAttNo, +#else + state->junkfilter->jf_junkAttNo, +#endif + &ctid_isnull); + + /* shouldn't ever get a null result... */ + if (ctid_isnull) + elog(ERROR, "ctid is NULL"); + + /* Get item pointer to tuple */ + return *(ItemPointer) DatumGetPointer(ctid_datum); + } + else if (relkind == RELKIND_FOREIGN_TABLE) + elog(ERROR, UPDATE_NODE_NAME " does not support foreign tables"); + else + elog(ERROR, UPDATE_NODE_NAME " cannot handle relkind %u", relkind); + return *(ItemPointer) NULL; /* keep compiler quiet, lol */ +} + +/* This is a heavily modified copy of ExecDelete from nodeModifyTable.c */ +static TupleTableSlot * +router_lock_or_delete_tuple(PartitionRouterState *state, + TupleTableSlot *slot, + ItemPointer tupleid, + bool *deleted /* return value #1 */) +{ + ResultRelInfo *rri; + Relation rel; + + EState *estate = state->css.ss.ps.state; + ExprContext *econtext = GetPerTupleExprContext(estate); + ExprState *constraint = state->constraint; + + /* Maintaining both >= 12 and earlier is quite horrible there, you know */ +#if PG_VERSION_NUM >= 120000 + TM_FailureData tmfd; + TM_Result result; +#else + HeapUpdateFailureData tmfd; + HTSU_Result result; +#endif + + EPQState *epqstate = &state->epqstate; + + LOCKMODE lockmode; + bool try_delete; + + *deleted = false; + + EvalPlanQualSetSlot(epqstate, slot); + + /* Get information on the (current) result relation */ + rri = estate->es_result_relation_info; + rel = rri->ri_RelationDesc; + lockmode = ExecUpdateLockMode(estate, rri); + +recheck: + /* Does tuple still belong to current partition? */ + econtext->ecxt_scantuple = slot; + try_delete = !ExecCheck(constraint, econtext); + + /* Lock or delete tuple */ + if (try_delete) + { + /* BEFORE ROW UPDATE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_update_before_row) + { +#if PG_VERSION_NUM >= 120000 + if (!ExecBRUpdateTriggersCompat(estate, epqstate, rri, tupleid, NULL, slot)) + return NULL; +#else + slot = ExecBRUpdateTriggers(estate, epqstate, rri, tupleid, NULL, slot); + if (TupIsNull(slot)) + return NULL; +#endif + } + + /* BEFORE ROW DELETE triggers */ + if (rri->ri_TrigDesc && + rri->ri_TrigDesc->trig_delete_before_row) + { + if (!ExecBRDeleteTriggersCompat(estate, epqstate, rri, tupleid, NULL, NULL)) + return NULL; + } + + /* Delete the tuple */ + result = heap_delete_compat(rel, tupleid, + estate->es_output_cid, + estate->es_crosscheck_snapshot, + true /* wait for commit */, &tmfd, + true /* changing partition */); + } + else + { + HeapTupleData tuple; + Buffer buffer; + + tuple.t_self = *tupleid; + /* xxx why we ever need this? */ + result = heap_lock_tuple(rel, &tuple, + estate->es_output_cid, + lockmode, LockWaitBlock, + false, &buffer, &tmfd); + + ReleaseBuffer(buffer); + } + + /* Check lock/delete status */ + switch (result) + { +#if PG_VERSION_NUM >= 120000 + case TM_SelfModified: +#else + case HeapTupleSelfUpdated: +#endif + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be updated was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + + /* Already deleted by self; nothing to do */ + return NULL; + +#if PG_VERSION_NUM >= 120000 + case TM_Ok: +#else + case HeapTupleMayBeUpdated: +#endif + break; + +#if PG_VERSION_NUM >= 120000 /* TM_Deleted/TM_Updated */ + case TM_Updated: + { + /* not sure this stuff is correct at all */ + TupleTableSlot *inputslot; + TupleTableSlot *epqslot; + + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + + /* + * Already know that we're going to need to do EPQ, so + * fetch tuple directly into the right slot. + */ + inputslot = EvalPlanQualSlot(epqstate, rel, rri->ri_RangeTableIndex); + + result = table_tuple_lock(rel, tupleid, + estate->es_snapshot, + inputslot, estate->es_output_cid, + LockTupleExclusive, LockWaitBlock, + TUPLE_LOCK_FLAG_FIND_LAST_VERSION, + &tmfd); + + switch (result) + { + case TM_Ok: + Assert(tmfd.traversed); + epqslot = EvalPlanQual(epqstate, + rel, + rri->ri_RangeTableIndex, + inputslot); + if (TupIsNull(epqslot)) + /* Tuple not passing quals anymore, exiting... */ + return NULL; + + /* just copied from below, ha */ + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + + case TM_SelfModified: + + /* + * This can be reached when following an update + * chain from a tuple updated by another session, + * reaching a tuple that was already updated in + * this transaction. If previously updated by this + * command, ignore the delete, otherwise error + * out. + * + * See also TM_SelfModified response to + * table_tuple_delete() above. + */ + if (tmfd.cmax != estate->es_output_cid) + ereport(ERROR, + (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), + errmsg("tuple to be deleted was already modified by an operation triggered by the current command"), + errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); + return NULL; + + case TM_Deleted: + /* tuple already deleted; nothing to do */ + return NULL; + + default: + + /* + * TM_Invisible should be impossible because we're + * waiting for updated row versions, and would + * already have errored out if the first version + * is invisible. + * + * TM_Updated should be impossible, because we're + * locking the latest version via + * TUPLE_LOCK_FLAG_FIND_LAST_VERSION. + */ + elog(ERROR, "unexpected table_tuple_lock status: %u", + result); + return NULL; + } + + Assert(false); + break; + } + + + case TM_Deleted: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent delete"))); + /* tuple already deleted; nothing to do */ + return NULL; + +#else + case HeapTupleUpdated: + if (IsolationUsesXactSnapshot()) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("could not serialize access due to concurrent update"))); + if (ItemPointerIndicatesMovedPartitions(&tmfd.ctid)) + ereport(ERROR, + (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), + errmsg("tuple to be updated was already moved to another partition due to concurrent update"))); + + if (!ItemPointerEquals(tupleid, &tmfd.ctid)) + { + TupleTableSlot *epqslot; + + epqslot = EvalPlanQual(estate, + epqstate, + rel, + rri->ri_RangeTableIndex, + LockTupleExclusive, + &tmfd.ctid, + tmfd.xmax); + + if (!TupIsNull(epqslot)) + { + Assert(tupleid != NULL); + *tupleid = tmfd.ctid; + slot = epqslot; + goto recheck; + } + } + + /* Tuple already deleted; nothing to do */ + return NULL; +#endif /* TM_Deleted/TM_Updated */ + +#if PG_VERSION_NUM >= 120000 + case TM_Invisible: +#else + case HeapTupleInvisible: +#endif + elog(ERROR, "attempted to lock invisible tuple"); + break; + + default: + elog(ERROR, "unrecognized heap_delete status: %u", result); + break; + } + + /* Additional work for delete s*/ + if (try_delete) + { + /* AFTER ROW DELETE triggers */ + ExecARDeleteTriggersCompat(estate, rri, tupleid, NULL, NULL); + } + + *deleted = try_delete; + return slot; +} diff --git a/src/pathman_workers.c b/src/pathman_workers.c index e3bb7bf5..bf23bd94 100644 --- a/src/pathman_workers.c +++ b/src/pathman_workers.c @@ -57,8 +57,8 @@ extern PGDLLEXPORT void bgw_main_concurrent_part(Datum main_arg); static void handle_sigterm(SIGNAL_ARGS); static void bg_worker_load_config(const char *bgw_name); -static void start_bg_worker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +static bool start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown); @@ -84,12 +84,16 @@ static const char *spawn_partitions_bgw = "SpawnPartitionsWorker"; static const char *concurrent_part_bgw = "ConcurrentPartWorker"; +/* Used for preventing spawn bgw recursion trouble */ +static bool am_spawn_bgw = false; + /* * Estimate amount of shmem needed for concurrent partitioning. */ Size estimate_concurrent_part_task_slots_size(void) { + /* NOTE: we suggest that max_worker_processes is in PGC_POSTMASTER */ return sizeof(ConcurrentPartSlot) * PART_WORKER_SLOTS; } @@ -125,6 +129,7 @@ init_concurrent_part_task_slots(void) /* * Handle SIGTERM in BGW's process. + * Use it in favor of bgworker_die(). */ static void handle_sigterm(SIGNAL_ARGS) @@ -160,9 +165,9 @@ bg_worker_load_config(const char *bgw_name) /* * Common function to start background worker. */ -static void -start_bg_worker(const char bgworker_name[BGW_MAXLEN], - const char bgworker_proc[BGW_MAXLEN], +static bool +start_bgworker(const char *bgworker_name, + const char *bgworker_proc, Datum bgw_arg, bool wait_for_shutdown) { #define HandleError(condition, new_state) \ @@ -183,17 +188,19 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], pid_t pid; /* Initialize worker struct */ - memcpy(worker.bgw_name, bgworker_name, BGW_MAXLEN); - memcpy(worker.bgw_function_name, bgworker_proc, BGW_MAXLEN); - memcpy(worker.bgw_library_name, "pg_pathman", BGW_MAXLEN); + memset(&worker, 0, sizeof(worker)); + + snprintf(worker.bgw_name, BGW_MAXLEN, "%s", bgworker_name); + snprintf(worker.bgw_function_name, BGW_MAXLEN, "%s", bgworker_proc); + snprintf(worker.bgw_library_name, BGW_MAXLEN, "pg_pathman"); worker.bgw_flags = BGWORKER_SHMEM_ACCESS | +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 130000 && PG_VERSION_NUM < 140000 /* FIXME: need to remove last condition in future */ + BGWORKER_CLASS_PERSISTENT | +#endif BGWORKER_BACKEND_DATABASE_CONNECTION; worker.bgw_start_time = BgWorkerStart_RecoveryFinished; worker.bgw_restart_time = BGW_NEVER_RESTART; -#if PG_VERSION_NUM < 100000 - worker.bgw_main = NULL; -#endif worker.bgw_main_arg = bgw_arg; worker.bgw_notify_pid = MyProcPid; @@ -218,10 +225,9 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], switch (exec_state) { + /* Caller might want to handle this case */ case BGW_COULD_NOT_START: - elog(ERROR, "Unable to create background %s for pg_pathman", - bgworker_name); - break; + return false; case BGW_PM_DIED: ereport(ERROR, @@ -232,6 +238,18 @@ start_bg_worker(const char bgworker_name[BGW_MAXLEN], default: break; } + + return true; +} + +/* + * Show generic error message if we failed to start bgworker. + */ +static inline void +start_bgworker_errmsg(const char *bgworker_name) +{ + ereport(ERROR, (errmsg("could not start %s", bgworker_name), + errhint("consider increasing max_worker_processes"))); } @@ -300,6 +318,11 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) SpawnPartitionArgs *bgw_args; Oid child_oid = InvalidOid; + if (am_spawn_bgw) + ereport(ERROR, + (errmsg("Attempt to spawn partition using bgw from bgw spawning partitions"), + errhint("Probably init_callback has INSERT to its table?"))); + /* Create a dsm segment for the worker to pass arguments */ segment = create_partitions_bg_worker_segment(relid, value, value_type); segment_handle = dsm_segment_handle(segment); @@ -311,10 +334,13 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) #endif /* Start worker and wait for it to finish */ - start_bg_worker(spawn_partitions_bgw, - CppAsString(bgw_main_spawn_partitions), - UInt32GetDatum(segment_handle), - true); + if (!start_bgworker(spawn_partitions_bgw, + CppAsString(bgw_main_spawn_partitions), + UInt32GetDatum(segment_handle), + true)) + { + start_bgworker_errmsg(spawn_partitions_bgw); + } /* Save the result (partition Oid) */ child_oid = bgw_args->result; @@ -324,7 +350,7 @@ create_partitions_for_value_bg_worker(Oid relid, Datum value, Oid value_type) if (child_oid == InvalidOid) ereport(ERROR, - (errmsg("Attempt to spawn new partitions of relation \"%s\" failed", + (errmsg("attempt to spawn new partitions of relation \"%s\" failed", get_rel_name_or_relid(relid)), errhint("See server log for more details."))); @@ -341,6 +367,7 @@ bgw_main_spawn_partitions(Datum main_arg) dsm_segment *segment; SpawnPartitionArgs *args; Datum value; + Oid result; /* Establish signal handlers before unblocking signals. */ pqsignal(SIGTERM, handle_sigterm); @@ -348,6 +375,8 @@ bgw_main_spawn_partitions(Datum main_arg) /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); + am_spawn_bgw = true; + /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, spawn_partitions_bgw); @@ -369,7 +398,7 @@ bgw_main_spawn_partitions(Datum main_arg) #endif /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(args->dbid, args->userid); + BackgroundWorkerInitializeConnectionByOidCompat(args->dbid, args->userid); /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -390,18 +419,17 @@ bgw_main_spawn_partitions(Datum main_arg) DebugPrintDatum(value, args->value_type), MyProcPid); #endif - /* Create partitions and save the Oid of the last one */ - args->result = create_partitions_for_value_internal(args->partitioned_table, - value, /* unpacked Datum */ - args->value_type, - true); /* background woker */ + /* + * Create partitions and save the Oid of the last one. + * If we fail here, args->result is 0 since it is zeroed on initialization. + */ + result = create_partitions_for_value_internal(args->partitioned_table, + value, /* unpacked Datum */ + args->value_type); /* Finish transaction in an appropriate way */ - if (args->result == InvalidOid) - AbortCurrentTransaction(); - else - CommitTransactionCommand(); - + CommitTransactionCommand(); + args->result = result; dsm_detach(segment); } @@ -412,19 +440,36 @@ bgw_main_spawn_partitions(Datum main_arg) * ------------------------------------- */ +/* Free bgworker's CPS slot */ +static void +free_cps_slot(int code, Datum arg) +{ + ConcurrentPartSlot *part_slot = (ConcurrentPartSlot *) DatumGetPointer(arg); + + cps_set_status(part_slot, CPS_FREE); +} + /* * Entry point for ConcurrentPartWorker's process. */ void bgw_main_concurrent_part(Datum main_arg) { - int rows; - bool failed; - int failures_count = 0; - char *sql = NULL; ConcurrentPartSlot *part_slot; + char *sql = NULL; + int64 rows; + volatile bool failed; + volatile int failures_count = 0; + LOCKMODE lockmode = RowExclusiveLock; - /* Establish signal handlers before unblocking signals. */ + /* Update concurrent part slot */ + part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; + part_slot->pid = MyProcPid; + + /* Establish atexit callback that will fre CPS slot */ + on_proc_exit(free_cps_slot, PointerGetDatum(part_slot)); + + /* Establish signal handlers before unblocking signals */ pqsignal(SIGTERM, handle_sigterm); /* We're now ready to receive signals */ @@ -433,15 +478,11 @@ bgw_main_concurrent_part(Datum main_arg) /* Create resource owner */ CurrentResourceOwner = ResourceOwnerCreate(NULL, concurrent_part_bgw); - /* Update concurrent part slot */ - part_slot = &concurrent_part_slots[DatumGetInt32(main_arg)]; - part_slot->pid = MyProcPid; - /* Disable auto partition propagation */ SetAutoPartitionEnabled(false); /* Establish connection and start transaction */ - BackgroundWorkerInitializeConnectionByOid(part_slot->dbid, part_slot->userid); + BackgroundWorkerInitializeConnectionByOidCompat(part_slot->dbid, part_slot->userid); /* Initialize pg_pathman's local config */ StartTransactionCommand(); @@ -451,16 +492,19 @@ bgw_main_concurrent_part(Datum main_arg) /* Do the job */ do { - MemoryContext old_mcxt; + MemoryContext old_mcxt; Oid types[2] = { OIDOID, INT4OID }; Datum vals[2] = { part_slot->relid, part_slot->batch_size }; - bool nulls[2] = { false, false }; + + volatile bool rel_locked = false; /* Reset loop variables */ failed = false; rows = 0; + CHECK_FOR_INTERRUPTS(); + /* Start new transaction (syscache access etc.) */ StartTransactionCommand(); @@ -476,80 +520,109 @@ bgw_main_concurrent_part(Datum main_arg) if (sql == NULL) { MemoryContext current_mcxt; + char *pathman_schema; + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* * Allocate SQL query in TopPathmanContext because current * context will be destroyed after transaction finishes */ current_mcxt = MemoryContextSwitchTo(TopPathmanContext); - sql = psprintf("SELECT %s._partition_data_concurrent($1::oid, p_limit:=$2)", - get_namespace_name(get_pathman_schema())); + sql = psprintf("SELECT %s._partition_data_concurrent($1::regclass, NULL::text, NULL::text, p_limit:=$2)", + pathman_schema); MemoryContextSwitchTo(current_mcxt); } /* Exec ret = _partition_data_concurrent() */ PG_TRY(); { - /* Make sure that relation exists and has partitions */ - if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid)) && - get_pathman_relation_info(part_slot->relid) != NULL) - { - int ret; - bool isnull; + int ret; + bool isnull; - ret = SPI_execute_with_args(sql, 2, types, vals, nulls, false, 0); - if (ret == SPI_OK_SELECT) - { - TupleDesc tupdesc = SPI_tuptable->tupdesc; - HeapTuple tuple = SPI_tuptable->vals[0]; + /* Lock relation for DELETE and INSERT */ + if (!ConditionalLockRelationOid(part_slot->relid, lockmode)) + { + elog(ERROR, "could not take lock on relation %u", part_slot->relid); + } - Assert(SPI_processed == 1); /* there should be 1 result at most */ + /* Great, now relation is locked */ + rel_locked = true; - rows = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + /* Make sure that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(part_slot->relid))) + { + /* Exit after we raise ERROR */ + failures_count = PART_WORKER_MAX_ATTEMPTS; - Assert(!isnull); /* ... and ofc it must not be NULL */ - } + elog(ERROR, "relation %u does not exist", part_slot->relid); } - /* Otherwise it's time to exit */ - else + + /* Make sure that relation has partitions */ + if (!has_pathman_relation_info(part_slot->relid)) { + /* Exit after we raise ERROR */ failures_count = PART_WORKER_MAX_ATTEMPTS; - elog(LOG, "relation \"%u\" is not partitioned (or does not exist)", - part_slot->relid); + elog(ERROR, "relation \"%s\" is not partitioned", + get_rel_name(part_slot->relid)); + } + + /* Call concurrent partitioning function */ + ret = SPI_execute_with_args(sql, 2, types, vals, NULL, false, 0); + if (ret == SPI_OK_SELECT) + { + TupleDesc tupdesc = SPI_tuptable->tupdesc; + HeapTuple tuple = SPI_tuptable->vals[0]; + + /* There should be 1 result at most */ + Assert(SPI_processed == 1); + + /* Extract number of processed rows */ + rows = DatumGetInt64(SPI_getbinval(tuple, tupdesc, 1, &isnull)); + Assert(TupleDescAttr(tupdesc, 0)->atttypid == INT8OID); /* check type */ + Assert(!isnull); /* ... and ofc it must not be NULL */ } + /* Else raise generic error */ + else elog(ERROR, "partitioning function returned %u", ret); + + /* Finally, unlock our partitioned table */ + UnlockRelationOid(part_slot->relid, lockmode); } PG_CATCH(); { /* * The most common exception we can catch here is a deadlock with * concurrent user queries. Check that attempts count doesn't exceed - * some reasonable value + * some reasonable value. */ - ErrorData *error; - char *sleep_time_str; + ErrorData *error; + + /* Unlock relation if we caught ERROR too early */ + if (rel_locked) + UnlockRelationOid(part_slot->relid, lockmode); + + /* Increase number of failures and set 'failed' status */ + failures_count++; + failed = true; /* Switch to the original context & copy edata */ MemoryContextSwitchTo(old_mcxt); error = CopyErrorData(); FlushErrorState(); - /* Print messsage for this BGWorker to server log */ - sleep_time_str = datum_to_cstring(Float8GetDatum(part_slot->sleep_time), - FLOAT8OID); - failures_count++; + /* Print message for this BGWorker to server log */ ereport(LOG, (errmsg("%s: %s", concurrent_part_bgw, error->message), - errdetail("attempt: %d/%d, sleep time: %s", + errdetail("attempt: %d/%d, sleep time: %.2f", failures_count, PART_WORKER_MAX_ATTEMPTS, - sleep_time_str))); - pfree(sleep_time_str); /* free the time string */ + (float) part_slot->sleep_time))); + /* Finally, free error data */ FreeErrorData(error); - - /* Set 'failed' flag */ - failed = true; } PG_END_TRY(); @@ -576,9 +649,10 @@ bgw_main_concurrent_part(Datum main_arg) /* Failed this time, wait */ else if (failed) { - /* Abort transaction and sleep for a second */ + /* Abort transaction */ AbortCurrentTransaction(); + /* Sleep for a specified amount of time (default 1s) */ DirectFunctionCall1(pg_sleep, Float8GetDatum(part_slot->sleep_time)); } @@ -592,12 +666,15 @@ bgw_main_concurrent_part(Datum main_arg) /* Add rows to total_rows */ SpinLockAcquire(&part_slot->mutex); part_slot->total_rows += rows; -/* Report debug message */ + SpinLockRelease(&part_slot->mutex); + #ifdef USE_ASSERT_CHECKING - elog(DEBUG1, "%s: relocated %d rows, total: " UINT64_FORMAT " [%u]", - concurrent_part_bgw, rows, part_slot->total_rows, MyProcPid); + /* Report debug message */ + elog(DEBUG1, "%s: " + "relocated" INT64_FORMAT "rows, " + "total: " INT64_FORMAT, + concurrent_part_bgw, rows, part_slot->total_rows); #endif - SpinLockRelease(&part_slot->mutex); } /* If other backend requested to stop us, quit */ @@ -605,12 +682,6 @@ bgw_main_concurrent_part(Datum main_arg) break; } while(rows > 0 || failed); /* do while there's still rows to be relocated */ - - /* Reclaim the resources */ - pfree(sql); - - /* Mark slot as FREE */ - cps_set_status(part_slot, CPS_FREE); } @@ -633,6 +704,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) int empty_slot_idx = -1, /* do we have a slot for BGWorker? */ i; TransactionId rel_xmin; + LOCKMODE lockmode = ShareUpdateExclusiveLock; + char *pathman_schema; /* Check batch_size */ if (batch_size < 1 || batch_size > 10000) @@ -645,12 +718,14 @@ partition_table_concurrently(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'sleep_time' should not be less than 0.5"))); + check_relation_oid(relid); + + /* Prevent concurrent function calls */ + LockRelationOid(relid, lockmode); + /* Check if relation is a partitioned table */ - shout_if_prel_is_invalid(relid, - /* We also lock the parent relation */ - get_pathman_relation_info_after_lock(relid, true, NULL), - /* Partitioning type does not matter here */ - PT_ANY); + if (!has_pathman_relation_info(relid)) + shout_if_prel_is_invalid(relid, NULL, PT_ANY); /* Check that partitioning operation result is visible */ if (pathman_config_contains_relation(relid, NULL, NULL, &rel_xmin, NULL)) @@ -665,7 +740,7 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* * Look for an empty slot and also check that a concurrent - * partitioning operation for this table hasn't been started yet + * partitioning operation for this table hasn't started yet. */ for (i = 0; i < PART_WORKER_SLOTS; i++) { @@ -694,9 +769,8 @@ partition_table_concurrently(PG_FUNCTION_ARGS) if (empty_slot_idx >= 0 && empty_slot_idx != i) SpinLockRelease(&concurrent_part_slots[empty_slot_idx].mutex); - elog(ERROR, - "table \"%s\" is already being partitioned", - get_rel_name(relid)); + ereport(ERROR, (errmsg("table \"%s\" is already being partitioned", + get_rel_name(relid)))); } /* Normally we don't want to keep it */ @@ -706,7 +780,9 @@ partition_table_concurrently(PG_FUNCTION_ARGS) /* Looks like we could not find an empty slot */ if (empty_slot_idx < 0) - elog(ERROR, "no empty worker slots found"); + ereport(ERROR, (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED), + errmsg("no empty worker slots found"), + errhint("consider increasing max_worker_processes"))); else { /* Initialize concurrent part slot */ @@ -719,19 +795,32 @@ partition_table_concurrently(PG_FUNCTION_ARGS) } /* Start worker (we should not wait) */ - start_bg_worker(concurrent_part_bgw, - CppAsString(bgw_main_concurrent_part), - Int32GetDatum(empty_slot_idx), - false); + if (!start_bgworker(concurrent_part_bgw, + CppAsString(bgw_main_concurrent_part), + Int32GetDatum(empty_slot_idx), + false)) + { + /* Couldn't start, free CPS slot */ + cps_set_status(&concurrent_part_slots[empty_slot_idx], CPS_FREE); + + start_bgworker_errmsg(concurrent_part_bgw); + } + + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); /* Tell user everything's fine */ elog(NOTICE, "worker started, you can stop it " "with the following command: select %s.%s('%s');", - get_namespace_name(get_pathman_schema()), + pathman_schema, CppAsString(stop_concurrent_part_task), get_rel_name(relid)); + /* We don't need this lock anymore */ + UnlockRelationOid(relid, lockmode); + PG_RETURN_VOID(); } @@ -762,7 +851,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cp_tasks, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cp_tasks, false); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_userid, "userid", REGROLEOID, -1, 0); @@ -773,7 +862,7 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_relid, "relid", REGCLASSOID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_processed, - "processed", INT4OID, -1, 0); + "processed", INT8OID, -1, 0); TupleDescInitEntry(tupdesc, Anum_pathman_cp_tasks_status, "status", TEXTOID, -1, 0); @@ -789,40 +878,32 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) /* Iterate through worker slots */ for (i = userctx->cur_idx; i < PART_WORKER_SLOTS; i++) { - ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; + ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i], + slot_copy; HeapTuple htup = NULL; - HOLD_INTERRUPTS(); + /* Copy slot to process local memory */ SpinLockAcquire(&cur_slot->mutex); + memcpy(&slot_copy, cur_slot, sizeof(ConcurrentPartSlot)); + SpinLockRelease(&cur_slot->mutex); - if (cur_slot->worker_status != CPS_FREE) + if (slot_copy.worker_status != CPS_FREE) { Datum values[Natts_pathman_cp_tasks]; bool isnull[Natts_pathman_cp_tasks] = { 0 }; - values[Anum_pathman_cp_tasks_userid - 1] = cur_slot->userid; - values[Anum_pathman_cp_tasks_pid - 1] = cur_slot->pid; - values[Anum_pathman_cp_tasks_dbid - 1] = cur_slot->dbid; - values[Anum_pathman_cp_tasks_relid - 1] = cur_slot->relid; - values[Anum_pathman_cp_tasks_processed - 1] = cur_slot->total_rows; + values[Anum_pathman_cp_tasks_userid - 1] = slot_copy.userid; + values[Anum_pathman_cp_tasks_pid - 1] = slot_copy.pid; + values[Anum_pathman_cp_tasks_dbid - 1] = slot_copy.dbid; + values[Anum_pathman_cp_tasks_relid - 1] = slot_copy.relid; + + /* Record processed rows */ + values[Anum_pathman_cp_tasks_processed - 1] = + Int64GetDatum(slot_copy.total_rows); /* Now build a status string */ - switch(cur_slot->worker_status) - { - case CPS_WORKING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("working")); - break; - - case CPS_STOPPING: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("stopping")); - break; - - default: - values[Anum_pathman_cp_tasks_status - 1] = - PointerGetDatum(cstring_to_text("[unknown]")); - } + values[Anum_pathman_cp_tasks_status - 1] = + CStringGetTextDatum(cps_print_status(slot_copy.worker_status)); /* Form output tuple */ htup = heap_form_tuple(funcctx->tuple_desc, values, isnull); @@ -831,9 +912,6 @@ show_concurrent_part_tasks_internal(PG_FUNCTION_ARGS) userctx->cur_idx = i + 1; } - SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); - /* Return tuple if needed */ if (htup) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(htup)); @@ -857,26 +935,25 @@ stop_concurrent_part_task(PG_FUNCTION_ARGS) { ConcurrentPartSlot *cur_slot = &concurrent_part_slots[i]; - HOLD_INTERRUPTS(); SpinLockAcquire(&cur_slot->mutex); if (cur_slot->worker_status != CPS_FREE && cur_slot->relid == relid && cur_slot->dbid == MyDatabaseId) { - elog(NOTICE, "worker will stop after it finishes current batch"); - /* Change worker's state & set 'worker_found' */ cur_slot->worker_status = CPS_STOPPING; worker_found = true; } SpinLockRelease(&cur_slot->mutex); - RESUME_INTERRUPTS(); } if (worker_found) + { + elog(NOTICE, "worker will stop after it finishes current batch"); PG_RETURN_BOOL(true); + } else { elog(ERROR, "cannot find worker for relation \"%s\"", diff --git a/src/pg_pathman.c b/src/pg_pathman.c index 1580bb22..6e835a1f 100644 --- a/src/pg_pathman.c +++ b/src/pg_pathman.c @@ -4,34 +4,51 @@ * This module sets planner hooks, handles SELECT queries and produces * paths for partitioned tables * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2021, Postgres Professional * * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" #include "compat/pg_compat.h" +#include "compat/rowmarks_fix.h" #include "init.h" #include "hooks.h" #include "pathman.h" #include "partition_filter.h" -#include "runtimeappend.h" +#include "partition_router.h" +#include "partition_overseer.h" +#include "planner_tree_modification.h" +#include "runtime_append.h" #include "runtime_merge_append.h" #include "postgres.h" +#include "access/genam.h" +#include "access/htup_details.h" #include "access/sysattr.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/xact.h" +#include "catalog/pg_collation.h" +#include "catalog/indexing.h" #include "catalog/pg_type.h" +#include "catalog/pg_extension.h" +#include "commands/extension.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#endif #include "optimizer/clauses.h" #include "optimizer/plancat.h" -#include "optimizer/prep.h" #include "optimizer/restrictinfo.h" #include "optimizer/cost.h" #include "utils/datum.h" -#include "utils/lsyscache.h" +#include "utils/fmgroids.h" #include "utils/rel.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" #include "utils/selfuncs.h" #include "utils/typcache.h" @@ -39,8 +56,8 @@ PG_MODULE_MAGIC; -Oid pathman_config_relid = InvalidOid, - pathman_config_params_relid = InvalidOid; +Oid pathman_config_relid = InvalidOid, + pathman_config_params_relid = InvalidOid; /* pg module functions */ @@ -75,10 +92,6 @@ static void handle_opexpr(const OpExpr *expr, const WalkerContext *context, WrapperNode *result); -static bool is_key_op_param(const OpExpr *expr, - const WalkerContext *context, - Node **param_ptr); - static Datum array_find_min_max(Datum *values, bool *isnull, int length, @@ -198,8 +211,7 @@ ExtractConst(Node *node, const WalkerContext *context) /* Evaluate expression */ estate = ExecInitExpr((Expr *) node, NULL); - value = ExecEvalExprCompat(estate, econtext, &isnull, - mult_result_handler); + value = ExecEvalExprCompat(estate, econtext, &isnull); #if PG_VERSION_NUM >= 100000 /* Free temp econtext if needed */ @@ -212,6 +224,47 @@ ExtractConst(Node *node, const WalkerContext *context) value, isnull, get_typbyval(typid)); } +/* + * Checks if expression is a KEY OP PARAM or PARAM OP KEY, + * where KEY is partitioning expression and PARAM is whatever. + * + * Returns: + * operator's Oid if KEY is a partitioning expr, + * otherwise InvalidOid. + */ +static Oid +IsKeyOpParam(const OpExpr *expr, + const WalkerContext *context, + Node **param_ptr) /* ret value #1 */ +{ + Node *left = linitial(expr->args), + *right = lsecond(expr->args); + + /* Check number of arguments */ + if (list_length(expr->args) != 2) + return InvalidOid; + + /* KEY OP PARAM */ + if (match_expr_to_operand(context->prel_expr, left)) + { + *param_ptr = right; + + /* return the same operator */ + return expr->opno; + } + + /* PARAM OP KEY */ + if (match_expr_to_operand(context->prel_expr, right)) + { + *param_ptr = left; + + /* commute to (KEY OP PARAM) */ + return get_commutator(expr->opno); + } + + return InvalidOid; +} + /* Selectivity estimator for common 'paramsel' */ static inline double estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) @@ -228,6 +281,32 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) else return 1.0; } +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 100000 +/* + * Reset cache at start and at finish ATX transaction + */ +static void +pathman_xact_cb(XactEvent event, void *arg) +{ + if (getNestLevelATX() > 0) + { + /* + * For each ATX transaction start/finish: need to reset pg_pathman + * cache because we shouldn't see uncommitted data in autonomous + * transaction and data of autonomous transaction in main transaction + */ + if ((event == XACT_EVENT_START /* start */) || + (event == XACT_EVENT_ABORT || + event == XACT_EVENT_PARALLEL_ABORT || + event == XACT_EVENT_COMMIT || + event == XACT_EVENT_PARALLEL_COMMIT || + event == XACT_EVENT_PREPARE /* finish */)) + { + pathman_relcache_hook(PointerGetDatum(NULL), InvalidOid); + } + } +} +#endif /* * ------------------- @@ -235,12 +314,15 @@ estimate_paramsel_using_prel(const PartRelationInfo *prel, int strategy) * ------------------- */ +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void pg_pathman_shmem_request(void); +#endif + /* Set initial values for all Postmaster's forks */ void _PG_init(void) { - PathmanInitState temp_init_state; - if (!process_shared_preload_libraries_in_progress) { elog(ERROR, "pg_pathman module must be initialized by Postmaster. " @@ -249,53 +331,76 @@ _PG_init(void) } /* Request additional shared resources */ +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = pg_pathman_shmem_request; +#else RequestAddinShmemSpace(estimate_pathman_shmem_size()); +#endif /* Assign pg_pathman's initial state */ - temp_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; - temp_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; - temp_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; - temp_init_state.initialization_needed = true; /* ofc it's needed! */ - - /* Apply initial state */ - restore_pathman_init_state(&temp_init_state); + pathman_init_state.pg_pathman_enable = DEFAULT_PATHMAN_ENABLE; + pathman_init_state.auto_partition = DEFAULT_PATHMAN_AUTO; + pathman_init_state.override_copy = DEFAULT_PATHMAN_OVERRIDE_COPY; + pathman_init_state.initialization_needed = true; /* ofc it's needed! */ /* Set basic hooks */ - set_rel_pathlist_hook_next = set_rel_pathlist_hook; - set_rel_pathlist_hook = pathman_rel_pathlist_hook; - set_join_pathlist_next = set_join_pathlist_hook; - set_join_pathlist_hook = pathman_join_pathlist_hook; - shmem_startup_hook_next = shmem_startup_hook; - shmem_startup_hook = pathman_shmem_startup_hook; - post_parse_analyze_hook_next = post_parse_analyze_hook; - post_parse_analyze_hook = pathman_post_parse_analysis_hook; - planner_hook_next = planner_hook; - planner_hook = pathman_planner_hook; - process_utility_hook_next = ProcessUtility_hook; - ProcessUtility_hook = pathman_process_utility_hook; - - /* Initialize PgPro-specific subsystems */ - init_expand_rte_hook(); + pathman_set_rel_pathlist_hook_next = set_rel_pathlist_hook; + set_rel_pathlist_hook = pathman_rel_pathlist_hook; + pathman_set_join_pathlist_next = set_join_pathlist_hook; + set_join_pathlist_hook = pathman_join_pathlist_hook; + pathman_shmem_startup_hook_next = shmem_startup_hook; + shmem_startup_hook = pathman_shmem_startup_hook; + pathman_post_parse_analyze_hook_next = post_parse_analyze_hook; + post_parse_analyze_hook = pathman_post_parse_analyze_hook; + pathman_planner_hook_next = planner_hook; + planner_hook = pathman_planner_hook; + pathman_process_utility_hook_next = ProcessUtility_hook; + ProcessUtility_hook = pathman_process_utility_hook; + pathman_executor_start_hook_prev = ExecutorStart_hook; + ExecutorStart_hook = pathman_executor_start_hook; /* Initialize static data for all subsystems */ init_main_pathman_toggles(); init_relation_info_static_data(); - init_runtimeappend_static_data(); + init_runtime_append_static_data(); init_runtime_merge_append_static_data(); init_partition_filter_static_data(); + init_partition_router_static_data(); + init_partition_overseer_static_data(); + +#ifdef PGPRO_EE + /* Callbacks for reload relcache for ATX transactions */ + PgproRegisterXactCallback(pathman_xact_cb, NULL, XACT_EVENT_KIND_VANILLA | XACT_EVENT_KIND_ATX); +#endif } +#if PG_VERSION_NUM >= 150000 /* for commit 4f2400cb3f10 */ +static void +pg_pathman_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + + RequestAddinShmemSpace(estimate_pathman_shmem_size()); +} +#endif + /* Get cached PATHMAN_CONFIG relation Oid */ Oid get_pathman_config_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_relid)); return pathman_config_relid; } @@ -304,17 +409,70 @@ get_pathman_config_relid(bool invalid_is_ok) Oid get_pathman_config_params_relid(bool invalid_is_ok) { + if (!IsPathmanInitialized()) + { + if (invalid_is_ok) + return InvalidOid; + elog(ERROR, "pg_pathman is not initialized yet"); + } + /* Raise ERROR if Oid is invalid */ if (!OidIsValid(pathman_config_relid) && !invalid_is_ok) - elog(ERROR, - (!IsPathmanInitialized() ? - "pg_pathman is not initialized yet" : - "unexpected error in function " - CppAsString(get_pathman_config_params_relid))); + elog(ERROR, "unexpected error in function " + CppAsString(get_pathman_config_params_relid)); return pathman_config_params_relid; } +/* + * Return pg_pathman schema's Oid or InvalidOid if that's not possible. + */ +Oid +get_pathman_schema(void) +{ + Oid result; + Relation rel; + SysScanDesc scandesc; + HeapTuple tuple; + ScanKeyData entry[1]; + Oid ext_oid; + + /* It's impossible to fetch pg_pathman's schema now */ + if (!IsTransactionState()) + return InvalidOid; + + ext_oid = get_extension_oid("pg_pathman", true); + if (ext_oid == InvalidOid) + return InvalidOid; /* exit if pg_pathman does not exist */ + + ScanKeyInit(&entry[0], +#if PG_VERSION_NUM >= 120000 + Anum_pg_extension_oid, +#else + ObjectIdAttributeNumber, +#endif + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(ext_oid)); + + rel = heap_open_compat(ExtensionRelationId, AccessShareLock); + scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, + NULL, 1, entry); + + tuple = systable_getnext(scandesc); + + /* We assume that there can be at most one matching tuple */ + if (HeapTupleIsValid(tuple)) + result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; + else + result = InvalidOid; + + systable_endscan(scandesc); + + heap_close_compat(rel, AccessShareLock); + + return result; +} + /* @@ -330,8 +488,12 @@ get_pathman_config_params_relid(bool invalid_is_ok) * NOTE: partially based on the expand_inherited_rtentry() function. */ Index -append_child_relation(PlannerInfo *root, Relation parent_relation, - Index parent_rti, int ir_index, Oid child_oid, +append_child_relation(PlannerInfo *root, + Relation parent_relation, + PlanRowMark *parent_rowmark, + Index parent_rti, + int ir_index, + Oid child_oid, List *wrappers) { RangeTblEntry *parent_rte, @@ -340,55 +502,188 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, *child_rel; Relation child_relation; AppendRelInfo *appinfo; - Index childRTindex; - PlanRowMark *parent_rowmark, - *child_rowmark; + Index child_rti; + PlanRowMark *child_rowmark = NULL; Node *childqual; List *childquals; ListCell *lc1, *lc2; + LOCKMODE lockmode; +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + TupleDesc child_tupdesc; + List *parent_colnames; + List *child_colnames; +#endif + + /* Choose a correct lock mode */ + if (parent_rti == root->parse->resultRelation) + lockmode = RowExclusiveLock; + else if (parent_rowmark && RowMarkRequiresRowShareLock(parent_rowmark->markType)) + lockmode = RowShareLock; + else + lockmode = AccessShareLock; + + /* Acquire a suitable lock on partition */ + LockRelationOid(child_oid, lockmode); + + /* Check that partition exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(child_oid))) + { + UnlockRelationOid(child_oid, lockmode); + return 0; + } parent_rel = root->simple_rel_array[parent_rti]; + + /* make clang analyzer quiet */ + if (!parent_rel) + elog(ERROR, "parent relation is NULL"); + parent_rte = root->simple_rte_array[parent_rti]; - /* FIXME: acquire a suitable lock on partition */ - child_relation = heap_open(child_oid, NoLock); + /* Open child relation (we've just locked it) */ + child_relation = heap_open_compat(child_oid, NoLock); /* Create RangeTblEntry for child relation */ +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + child_rte = makeNode(RangeTblEntry); + memcpy(child_rte, parent_rte, sizeof(RangeTblEntry)); +#else child_rte = copyObject(parent_rte); +#endif child_rte->relid = child_oid; child_rte->relkind = child_relation->rd_rel->relkind; - child_rte->inh = false; /* relation has no children */ - child_rte->requiredPerms = 0; /* perform all checks on parent */ +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* No permission checking for the child RTE */ + child_rte->perminfoindex = 0; +#else + child_rte->requiredPerms = 0; /* perform all checks on parent */ +#endif + child_rte->inh = false; /* Add 'child_rte' to rtable and 'root->simple_rte_array' */ root->parse->rtable = lappend(root->parse->rtable, child_rte); - childRTindex = list_length(root->parse->rtable); - root->simple_rte_array[childRTindex] = child_rte; - - /* Create RelOptInfo for this child (and make some estimates as well) */ - child_rel = build_simple_rel_compat(root, childRTindex, parent_rel); - - /* Increase total_table_pages using the 'child_rel' */ - root->total_table_pages += (double) child_rel->pages; - + child_rti = list_length(root->parse->rtable); + root->simple_rte_array[child_rti] = child_rte; /* Build an AppendRelInfo for this child */ appinfo = makeNode(AppendRelInfo); appinfo->parent_relid = parent_rti; - appinfo->child_relid = childRTindex; + appinfo->child_relid = child_rti; appinfo->parent_reloid = parent_rte->relid; /* Store table row types for wholerow references */ appinfo->parent_reltype = RelationGetDescr(parent_relation)->tdtypeid; appinfo->child_reltype = RelationGetDescr(child_relation)->tdtypeid; - make_inh_translation_list(parent_relation, child_relation, childRTindex, - &appinfo->translated_vars); + make_inh_translation_list(parent_relation, child_relation, child_rti, + &appinfo->translated_vars, appinfo); + +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + /* tablesample is probably null, but copy it */ + child_rte->tablesample = copyObject(parent_rte->tablesample); + + /* + * Construct an alias clause for the child, which we can also use as eref. + * This is important so that EXPLAIN will print the right column aliases + * for child-table columns. (Since ruleutils.c doesn't have any easy way + * to reassociate parent and child columns, we must get the child column + * aliases right to start with. Note that setting childrte->alias forces + * ruleutils.c to use these column names, which it otherwise would not.) + */ + child_tupdesc = RelationGetDescr(child_relation); + parent_colnames = parent_rte->eref->colnames; + child_colnames = NIL; + for (int cattno = 0; cattno < child_tupdesc->natts; cattno++) + { + Form_pg_attribute att = TupleDescAttr(child_tupdesc, cattno); + const char *attname; + + if (att->attisdropped) + { + /* Always insert an empty string for a dropped column */ + attname = ""; + } + else if (appinfo->parent_colnos[cattno] > 0 && + appinfo->parent_colnos[cattno] <= list_length(parent_colnames)) + { + /* Duplicate the query-assigned name for the parent column */ + attname = strVal(list_nth(parent_colnames, + appinfo->parent_colnos[cattno] - 1)); + } + else + { + /* New column, just use its real name */ + attname = NameStr(att->attname); + } + child_colnames = lappend(child_colnames, makeString(pstrdup(attname))); + } + + /* + * We just duplicate the parent's table alias name for each child. If the + * plan gets printed, ruleutils.c has to sort out unique table aliases to + * use, which it can handle. + */ + child_rte->alias = child_rte->eref = makeAlias(parent_rte->eref->aliasname, + child_colnames); +#endif /* Now append 'appinfo' to 'root->append_rel_list' */ root->append_rel_list = lappend(root->append_rel_list, appinfo); + /* And to array in >= 11, it must be big enough */ +#if PG_VERSION_NUM >= 110000 + root->append_rel_array[child_rti] = appinfo; +#endif + + /* Create RelOptInfo for this child (and make some estimates as well) */ + child_rel = build_simple_rel_compat(root, child_rti, parent_rel); + + /* Increase total_table_pages using the 'child_rel' */ + root->total_table_pages += (double) child_rel->pages; + + + /* Create rowmarks required for child rels */ + /* + * XXX: vanilla recurses down with *top* rowmark, not immediate parent one. + * Not sure about example where this matters though. + */ + if (parent_rowmark) + { + child_rowmark = makeNode(PlanRowMark); + + child_rowmark->rti = child_rti; + child_rowmark->prti = parent_rti; + child_rowmark->rowmarkId = parent_rowmark->rowmarkId; + /* Reselect rowmark type, because relkind might not match parent */ + child_rowmark->markType = select_rowmark_type(child_rte, + parent_rowmark->strength); + child_rowmark->allMarkTypes = (1 << child_rowmark->markType); + child_rowmark->strength = parent_rowmark->strength; + child_rowmark->waitPolicy = parent_rowmark->waitPolicy; + child_rowmark->isParent = false; + root->rowMarks = lappend(root->rowMarks, child_rowmark); + + /* Adjust tlist for RowMarks (see planner.c) */ + /* + * XXX Saner approach seems to + * 1) Add tle to top parent and processed_tlist once in rel_pathlist_hook. + * 2) Mark isParent = true + * *parent* knows it is parent, after all; why should child bother? + * 3) Recursion (code executed in childs) starts at 2) + */ + if (!parent_rowmark->isParent && !root->parse->setOperations) + { + append_tle_for_rowmark(root, parent_rowmark); + } + + /* Include child's rowmark type in parent's allMarkTypes */ + parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; + parent_rowmark->isParent = true; + } + + +#if PG_VERSION_NUM < 160000 /* for commit a61b1f74823c */ /* Translate column privileges for this child */ if (parent_rte->relid != child_oid) { @@ -399,12 +694,21 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, child_rte->updatedCols = translate_col_privs(parent_rte->updatedCols, appinfo->translated_vars); } +#if PG_VERSION_NUM >= 130000 /* see commit 55a1954d */ + else + { + child_rte->selectedCols = bms_copy(parent_rte->selectedCols); + child_rte->insertedCols = bms_copy(parent_rte->insertedCols); + child_rte->updatedCols = bms_copy(parent_rte->updatedCols); + } +#endif +#endif /* PG_VERSION_NUM < 160000 */ /* Here and below we assume that parent RelOptInfo exists */ - AssertState(parent_rel); + Assert(parent_rel); /* Adjust join quals for this child */ - child_rel->joininfo = (List *) adjust_appendrel_attrs(root, + child_rel->joininfo = (List *) adjust_appendrel_attrs_compat(root, (Node *) parent_rel->joininfo, appinfo); @@ -441,7 +745,7 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, else childquals = get_all_actual_clauses(parent_rel->baserestrictinfo); /* Now it's time to change varnos and rebuld quals */ - childquals = (List *) adjust_appendrel_attrs(root, + childquals = (List *) adjust_appendrel_attrs_compat(root, (Node *) childquals, appinfo); childqual = eval_const_expressions(root, (Node *) @@ -454,7 +758,11 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, * Restriction reduces to constant FALSE or constant NULL after * substitution, so this child need not be scanned. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } childquals = make_ands_implicit((Expr *) childqual); childquals = make_restrictinfos_from_actual_clauses(root, childquals); @@ -468,7 +776,11 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, * This child need not be scanned, so we can omit it from the * appendrel. */ +#if PG_VERSION_NUM >= 120000 + mark_dummy_rel(child_rel); +#else set_dummy_rel_pathlist(child_rel); +#endif } /* @@ -479,35 +791,24 @@ append_child_relation(PlannerInfo *root, Relation parent_relation, add_child_rel_equivalences(root, appinfo, parent_rel, child_rel); child_rel->has_eclass_joins = parent_rel->has_eclass_joins; - /* Close child relations, but keep locks */ - heap_close(child_relation, NoLock); - - - /* Create rowmarks required for child rels */ - parent_rowmark = get_plan_rowmark(root->rowMarks, parent_rti); - if (parent_rowmark) + /* Expand child partition if it might have subpartitions */ + if (parent_rte->relid != child_oid && + child_relation->rd_rel->relhassubclass) { - child_rowmark = makeNode(PlanRowMark); - - child_rowmark->rti = childRTindex; - child_rowmark->prti = parent_rti; - child_rowmark->rowmarkId = parent_rowmark->rowmarkId; - /* Reselect rowmark type, because relkind might not match parent */ - child_rowmark->markType = select_rowmark_type(child_rte, - parent_rowmark->strength); - child_rowmark->allMarkTypes = (1 << child_rowmark->markType); - child_rowmark->strength = parent_rowmark->strength; - child_rowmark->waitPolicy = parent_rowmark->waitPolicy; - child_rowmark->isParent = false; - - root->rowMarks = lappend(root->rowMarks, child_rowmark); - - /* Include child's rowmark type in parent's allMarkTypes */ - parent_rowmark->allMarkTypes |= child_rowmark->allMarkTypes; - parent_rowmark->isParent = true; + /* See XXX above */ + if (child_rowmark) + child_rowmark->isParent = true; + + pathman_rel_pathlist_hook(root, + child_rel, + child_rti, + child_rte); } - return childRTindex; + /* Close child relations, but keep locks */ + heap_close_compat(child_relation, NoLock); + + return child_rti; } @@ -529,22 +830,20 @@ select_range_partitions(const Datum value, WrapperNode *result) /* returned partitions */ { bool lossy = false, - is_less, - is_greater; - -#ifdef USE_ASSERT_CHECKING - bool found = false; - int counter = 0; -#endif + miss_left, /* 'value' is less than left bound */ + miss_right; /* 'value' is greater that right bound */ int startidx = 0, endidx = nranges - 1, cmp_min, cmp_max, - i; + i = 0; Bound value_bound = MakeBound(value); /* convert value to Bound */ +#ifdef USE_ASSERT_CHECKING + int counter = 0; +#endif /* Initial value (no missing partitions found) */ result->found_gap = false; @@ -566,9 +865,9 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[startidx].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[endidx].max); - if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || - (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || - strategy == BTEqualStrategyNumber))) + if ((cmp_min <= 0 && strategy == BTLessStrategyNumber) || + (cmp_min < 0 && (strategy == BTLessEqualStrategyNumber || + strategy == BTEqualStrategyNumber))) { result->rangeset = NIL; return; @@ -582,7 +881,7 @@ select_range_partitions(const Datum value, return; } - if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || + if ((cmp_min < 0 && strategy == BTGreaterStrategyNumber) || (cmp_min <= 0 && strategy == BTGreaterEqualStrategyNumber)) { result->rangeset = list_make1_irange(make_irange(startidx, @@ -615,44 +914,60 @@ select_range_partitions(const Datum value, cmp_min = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].min); cmp_max = cmp_bounds(cmp_func, collid, &value_bound, &ranges[i].max); - is_less = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); - is_greater = (cmp_max > 0 || (cmp_max >= 0 && strategy != BTLessStrategyNumber)); + /* How is 'value' located with respect to left & right bounds? */ + miss_left = (cmp_min < 0 || (cmp_min == 0 && strategy == BTLessStrategyNumber)); + miss_right = (cmp_max > 0 || (cmp_max == 0 && strategy != BTLessStrategyNumber)); - if (!is_less && !is_greater) + /* Searched value is inside of partition */ + if (!miss_left && !miss_right) { - if (strategy == BTGreaterEqualStrategyNumber && cmp_min == 0) + /* 'value' == 'min' and we want everything on the right */ + if (cmp_min == 0 && strategy == BTGreaterEqualStrategyNumber) lossy = false; - else if (strategy == BTLessStrategyNumber && cmp_max == 0) + /* 'value' == 'max' and we want everything on the left */ + else if (cmp_max == 0 && strategy == BTLessStrategyNumber) lossy = false; - else - lossy = true; + /* We're somewhere in the middle */ + else lossy = true; -#ifdef USE_ASSERT_CHECKING - found = true; -#endif - break; + break; /* just exit loop */ } /* Indices have met, looks like there's no partition */ if (startidx >= endidx) { - result->rangeset = NIL; + result->rangeset = NIL; result->found_gap = true; - return; + + /* Return if it's "key = value" */ + if (strategy == BTEqualStrategyNumber) + return; + + /* + * Use current partition 'i' as a pivot that will be + * excluded by relation_excluded_by_constraints() if + * (lossy == true) & its WHERE clauses are trivial. + */ + if ((miss_left && (strategy == BTLessStrategyNumber || + strategy == BTLessEqualStrategyNumber)) || + (miss_right && (strategy == BTGreaterStrategyNumber || + strategy == BTGreaterEqualStrategyNumber))) + lossy = true; + else + lossy = false; + + break; /* just exit loop */ } - if (is_less) + if (miss_left) endidx = i - 1; - else if (is_greater) + else if (miss_right) startidx = i + 1; /* For debug's sake */ Assert(++counter < 100); } - /* Should've been found by now */ - Assert(found); - /* Filter partitions */ switch(strategy) { @@ -681,18 +996,16 @@ select_range_partitions(const Datum value, { result->rangeset = list_make1_irange(make_irange(i, i, IR_LOSSY)); if (i < nranges - 1) - result->rangeset = - lappend_irange(result->rangeset, - make_irange(i + 1, - nranges - 1, - IR_COMPLETE)); + result->rangeset = lappend_irange(result->rangeset, + make_irange(i + 1, + nranges - 1, + IR_COMPLETE)); } else { - result->rangeset = - list_make1_irange(make_irange(i, - nranges - 1, - IR_COMPLETE)); + result->rangeset = list_make1_irange(make_irange(i, + nranges - 1, + IR_COMPLETE)); } break; @@ -900,9 +1213,14 @@ handle_const(const Const *c, } /* Else use the Const's value */ else value = c->constvalue; - - /* Calculate 32-bit hash of 'value' and corresponding index */ - hash = OidFunctionCall1(prel->hash_proc, value); + /* + * Calculate 32-bit hash of 'value' and corresponding index. + * Since 12, hashtext requires valid collation. Since we never + * supported this, passing db default one will do. + */ + hash = OidFunctionCall1Coll(prel->hash_proc, + DEFAULT_COLLATION_OID, + value); idx = hash_to_part_index(DatumGetInt32(hash), PrelChildrenCount(prel)); @@ -969,8 +1287,14 @@ handle_array(ArrayType *array, bool elem_byval; char elem_align; - /* Check if we can work with this strategy */ - if (strategy == 0) + /* + * Check if we can work with this strategy + * We can work only with BTLessStrategyNumber, BTLessEqualStrategyNumber, + * BTEqualStrategyNumber, BTGreaterEqualStrategyNumber and BTGreaterStrategyNumber. + * If new search strategies appear in the future, then access optimizations from + * this function will not work, and the default behavior (handle_array_return:) will work. + */ + if (strategy == InvalidStrategy || strategy > BTGreaterStrategyNumber) goto handle_array_return; /* Get element's properties */ @@ -987,8 +1311,12 @@ handle_array(ArrayType *array, List *ranges; int i; - /* This is only for paranoia's sake */ - Assert(BTMaxStrategyNumber == 5 && BTEqualStrategyNumber == 3); + /* This is only for paranoia's sake (checking correctness of following take_min calculation) */ + Assert(BTEqualStrategyNumber == 3 + && BTLessStrategyNumber < BTEqualStrategyNumber + && BTLessEqualStrategyNumber < BTEqualStrategyNumber + && BTGreaterEqualStrategyNumber > BTEqualStrategyNumber + && BTGreaterStrategyNumber > BTEqualStrategyNumber); /* Optimizations for <, <=, >=, > */ if (strategy != BTEqualStrategyNumber) @@ -1315,37 +1643,35 @@ handle_opexpr(const OpExpr *expr, { Node *param; const PartRelationInfo *prel = context->prel; + Oid opid; /* operator's Oid */ /* Save expression */ result->orig = (const Node *) expr; - if (list_length(expr->args) == 2) + /* Is it KEY OP PARAM or PARAM OP KEY? */ + if (OidIsValid(opid = IsKeyOpParam(expr, context, ¶m))) { - /* Is it KEY OP PARAM or PARAM OP KEY? */ - if (is_key_op_param(expr, context, ¶m)) - { - TypeCacheEntry *tce; - int strategy; + TypeCacheEntry *tce; + int strategy; - tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); - strategy = get_op_opfamily_strategy(expr->opno, tce->btree_opf); + tce = lookup_type_cache(prel->ev_type, TYPECACHE_BTREE_OPFAMILY); + strategy = get_op_opfamily_strategy(opid, tce->btree_opf); - if (IsConstValue(param, context)) - { - handle_const(ExtractConst(param, context), - expr->inputcollid, - strategy, context, result); + if (IsConstValue(param, context)) + { + handle_const(ExtractConst(param, context), + expr->inputcollid, + strategy, context, result); - return; /* done, exit */ - } - /* TODO: estimate selectivity for param if it's Var */ - else if (IsA(param, Param) || IsA(param, Var)) - { - result->rangeset = list_make1_irange_full(prel, IR_LOSSY); - result->paramsel = estimate_paramsel_using_prel(prel, strategy); + return; /* done, exit */ + } + /* TODO: estimate selectivity for param if it's Var */ + else if (IsA(param, Param) || IsA(param, Var)) + { + result->rangeset = list_make1_irange_full(prel, IR_LOSSY); + result->paramsel = estimate_paramsel_using_prel(prel, strategy); - return; /* done, exit */ - } + return; /* done, exit */ } } @@ -1354,35 +1680,6 @@ handle_opexpr(const OpExpr *expr, } -/* - * Checks if expression is a KEY OP PARAM or PARAM OP KEY, where - * KEY is partitioning expression and PARAM is whatever. - * - * NOTE: returns false if partition key is not in expression. - */ -static bool -is_key_op_param(const OpExpr *expr, - const WalkerContext *context, - Node **param_ptr) /* ret value #1 */ -{ - Node *left = linitial(expr->args), - *right = lsecond(expr->args); - - if (match_expr_to_operand(context->prel_expr, left)) - { - *param_ptr = right; - return true; - } - - if (match_expr_to_operand(context->prel_expr, right)) - { - *param_ptr = left; - return true; - } - - return false; -} - /* Find Max or Min value of array */ static Datum array_find_min_max(Datum *values, @@ -1724,7 +2021,8 @@ translate_col_privs(const Bitmapset *parent_privs, */ void make_inh_translation_list(Relation oldrelation, Relation newrelation, - Index newvarno, List **translated_vars) + Index newvarno, List **translated_vars, + AppendRelInfo *appinfo) { List *vars = NIL; TupleDesc old_tupdesc = RelationGetDescr(oldrelation); @@ -1732,6 +2030,17 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, int oldnatts = old_tupdesc->natts; int newnatts = new_tupdesc->natts; int old_attno; +#if PG_VERSION_NUM >= 130000 /* see commit ce76c0ba */ + AttrNumber *pcolnos = NULL; + + if (appinfo) + { + /* Initialize reverse-translation array with all entries zero */ + appinfo->num_child_cols = newnatts; + appinfo->parent_colnos = pcolnos = + (AttrNumber *) palloc0(newnatts * sizeof(AttrNumber)); + } +#endif for (old_attno = 0; old_attno < oldnatts; old_attno++) { @@ -1742,7 +2051,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, Oid attcollation; int new_attno; - att = old_tupdesc->attrs[old_attno]; + att = TupleDescAttr(old_tupdesc, old_attno); if (att->attisdropped) { /* Just put NULL into this list entry */ @@ -1766,6 +2075,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[old_attno] = old_attno + 1; +#endif continue; } @@ -1780,7 +2093,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * notational device to include the assignment into the if-clause. */ if (old_attno < newnatts && - (att = new_tupdesc->attrs[old_attno]) != NULL && + (att = TupleDescAttr(new_tupdesc, old_attno)) != NULL && !att->attisdropped && att->attinhcount != 0 && strcmp(attname, NameStr(att->attname)) == 0) new_attno = old_attno; @@ -1788,7 +2101,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, { for (new_attno = 0; new_attno < newnatts; new_attno++) { - att = new_tupdesc->attrs[new_attno]; + att = TupleDescAttr(new_tupdesc, new_attno); /* * Make clang analyzer happy: @@ -1823,6 +2136,10 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, atttypmod, attcollation, 0)); +#if PG_VERSION_NUM >= 130000 + if (pcolnos) + pcolnos[new_attno] = old_attno + 1; +#endif } *translated_vars = vars; @@ -1831,6 +2148,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, /* * set_append_rel_pathlist * Build access paths for an "append relation" + * Similar to PG function with the same name. * * NOTE: this function is 'public' (used in hooks.c) */ @@ -1859,9 +2177,9 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, foreach(l, root->append_rel_list) { AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); - Index childRTindex; - RangeTblEntry *childRTE; - RelOptInfo *childrel; + Index child_rti; + RangeTblEntry *child_rte; + RelOptInfo *child_rel; ListCell *lcp; /* append_rel_list contains all append rels; ignore others */ @@ -1869,9 +2187,12 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, continue; /* Re-locate the child RTE and RelOptInfo */ - childRTindex = appinfo->child_relid; - childRTE = root->simple_rte_array[childRTindex]; - childrel = root->simple_rel_array[childRTindex]; + child_rti = appinfo->child_relid; + child_rte = root->simple_rte_array[child_rti]; + child_rel = root->simple_rel_array[child_rti]; + + if (!child_rel) + elog(ERROR, "could not make access paths to a relation"); #if PG_VERSION_NUM >= 90600 /* @@ -1882,44 +2203,48 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * For consistency, do this before calling set_rel_size() for the child. */ if (root->glob->parallelModeOK && rel->consider_parallel) - set_rel_consider_parallel_compat(root, childrel, childRTE); + set_rel_consider_parallel_compat(root, child_rel, child_rte); #endif - /* Compute child's access paths & sizes */ - if (childRTE->relkind == RELKIND_FOREIGN_TABLE) + /* Build a few paths for this relation */ + if (child_rel->pathlist == NIL) { - /* childrel->rows should be >= 1 */ - set_foreign_size(root, childrel, childRTE); + /* Compute child's access paths & sizes */ + if (child_rte->relkind == RELKIND_FOREIGN_TABLE) + { + /* childrel->rows should be >= 1 */ + set_foreign_size(root, child_rel, child_rte); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(child_rel)) + continue; - set_foreign_pathlist(root, childrel, childRTE); - } - else - { - /* childrel->rows should be >= 1 */ - set_plain_rel_size(root, childrel, childRTE); + set_foreign_pathlist(root, child_rel, child_rte); + } + else + { + /* childrel->rows should be >= 1 */ + set_plain_rel_size(root, child_rel, child_rte); - /* If child IS dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) - continue; + /* If child IS dummy, ignore it */ + if (IS_DUMMY_REL(child_rel)) + continue; - set_plain_rel_pathlist(root, childrel, childRTE); + set_plain_rel_pathlist(root, child_rel, child_rte); + } } /* Set cheapest path for child */ - set_cheapest(childrel); + set_cheapest(child_rel); /* If child BECAME dummy, ignore it */ - if (IS_DUMMY_REL(childrel)) + if (IS_DUMMY_REL(child_rel)) continue; /* * Child is live, so add it to the live_childrels list for use below. */ - live_childrels = lappend(live_childrels, childrel); + live_childrels = lappend(live_childrels, child_rel); #if PG_VERSION_NUM >= 90600 /* @@ -1929,7 +2254,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * not; but we don't have that today, so it's a waste to consider * partial paths anywhere in the appendrel unless it's all safe. */ - if (!childrel->consider_parallel) + if (!child_rel->consider_parallel) rel->consider_parallel = false; #endif @@ -1938,17 +2263,17 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * the unparameterized Append path we are constructing for the parent. * If not, there's no workable unparameterized path. */ - if (childrel->cheapest_total_path->param_info == NULL) + if (child_rel->cheapest_total_path->param_info == NULL) subpaths = accumulate_append_subpath(subpaths, - childrel->cheapest_total_path); + child_rel->cheapest_total_path); else subpaths_valid = false; #if PG_VERSION_NUM >= 90600 /* Same idea, but for a partial plan. */ - if (childrel->partial_pathlist != NIL) + if (child_rel->partial_pathlist != NIL) partial_subpaths = accumulate_append_subpath(partial_subpaths, - linitial(childrel->partial_pathlist)); + linitial(child_rel->partial_pathlist)); else partial_subpaths_valid = false; #endif @@ -1959,7 +2284,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, * heuristic to indicate which sort orderings and parameterizations we * should build Append and MergeAppend paths for. */ - foreach(lcp, childrel->pathlist) + foreach(lcp, child_rel->pathlist) { Path *childpath = (Path *) lfirst(lcp); List *childkeys = childpath->pathkeys; diff --git a/src/pl_funcs.c b/src/pl_funcs.c index f2ca6164..75c1c12a 100644 --- a/src/pl_funcs.c +++ b/src/pl_funcs.c @@ -3,7 +3,7 @@ * pl_funcs.c * Utility C functions for stored procedures * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -18,12 +18,17 @@ #include "xact_handling.h" #include "utils.h" -#include "access/tupconvert.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/table.h" +#include "access/tableam.h" +#endif +#include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" -#include "catalog/pg_inherits_fn.h" #include "catalog/pg_type.h" #include "commands/tablespace.h" #include "commands/trigger.h" @@ -39,20 +44,24 @@ #include "utils/syscache.h" #include "utils/typcache.h" +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif + /* Function declarations */ PG_FUNCTION_INFO_V1( get_number_of_partitions_pl ); +PG_FUNCTION_INFO_V1( get_partition_key_type_pl ); +PG_FUNCTION_INFO_V1( get_partition_cooked_key_pl ); +PG_FUNCTION_INFO_V1( get_cached_partition_cooked_key_pl ); PG_FUNCTION_INFO_V1( get_parent_of_partition_pl ); PG_FUNCTION_INFO_V1( get_base_type_pl ); -PG_FUNCTION_INFO_V1( get_partition_key_type ); PG_FUNCTION_INFO_V1( get_tablespace_pl ); PG_FUNCTION_INFO_V1( show_cache_stats_internal ); PG_FUNCTION_INFO_V1( show_partition_list_internal ); -PG_FUNCTION_INFO_V1( build_update_trigger_name ); -PG_FUNCTION_INFO_V1( build_update_trigger_func_name ); PG_FUNCTION_INFO_V1( build_check_constraint_name ); PG_FUNCTION_INFO_V1( validate_relname ); @@ -72,51 +81,34 @@ PG_FUNCTION_INFO_V1( invoke_on_partition_created_callback ); PG_FUNCTION_INFO_V1( check_security_policy ); -PG_FUNCTION_INFO_V1( create_update_triggers ); -PG_FUNCTION_INFO_V1( pathman_update_trigger_func ); -PG_FUNCTION_INFO_V1( create_single_update_trigger ); -PG_FUNCTION_INFO_V1( has_update_trigger ); - PG_FUNCTION_INFO_V1( debug_capture ); -PG_FUNCTION_INFO_V1( get_pathman_lib_version ); - +PG_FUNCTION_INFO_V1( pathman_version ); /* User context for function show_partition_list_internal() */ typedef struct { - Relation pathman_config; - HeapScanDesc pathman_config_scan; - Snapshot snapshot; + Relation pathman_config; +#if PG_VERSION_NUM >= 120000 + TableScanDesc pathman_config_scan; +#else + HeapScanDesc pathman_config_scan; +#endif + Snapshot snapshot; - const PartRelationInfo *current_prel; /* selected PartRelationInfo */ + PartRelationInfo *current_prel; /* selected PartRelationInfo */ - Size child_number; /* child we're looking at */ - SPITupleTable *tuptable; /* buffer for tuples */ + Size child_number; /* child we're looking at */ + SPITupleTable *tuptable; /* buffer for tuples */ } show_partition_list_cxt; /* User context for function show_pathman_cache_stats_internal() */ typedef struct { - MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; - HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; - int current_item; + MemoryContext pathman_contexts[PATHMAN_MCXT_COUNT]; + HTAB *pathman_htables[PATHMAN_MCXT_COUNT]; + int current_item; } show_cache_stats_cxt; - -static AttrNumber *pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel); - -static ExprState *pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, - Relation source_rel, - HeapTuple new_tuple, - Oid *expr_type); - -static void pathman_update_trigger_func_move_tuple(Relation source_rel, - Relation target_rel, - HeapTuple old_tuple, - HeapTuple new_tuple); - - /* * ------------------------ * Various useful getters @@ -124,76 +116,115 @@ static void pathman_update_trigger_func_move_tuple(Relation source_rel, */ /* - * Get number of relation's partitions managed by pg_pathman. + * Return parent of a specified partition. */ Datum -get_number_of_partitions_pl(PG_FUNCTION_ARGS) +get_parent_of_partition_pl(PG_FUNCTION_ARGS) { - Oid parent = PG_GETARG_OID(0); - const PartRelationInfo *prel; + Oid partition = PG_GETARG_OID(0), + parent = get_parent_of_partition(partition); - /* If we couldn't find PartRelationInfo, return 0 */ - if ((prel = get_pathman_relation_info(parent)) == NULL) - PG_RETURN_INT32(0); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a partition", + get_rel_name_or_relid(partition)))); - PG_RETURN_INT32(PrelChildrenCount(prel)); + PG_RETURN_OID(parent); } /* - * Get parent of a specified partition. + * Return partition key type. */ Datum -get_parent_of_partition_pl(PG_FUNCTION_ARGS) +get_partition_key_type_pl(PG_FUNCTION_ARGS) { - Oid partition = PG_GETARG_OID(0); - PartParentSearch parent_search; - Oid parent; + Oid relid = PG_GETARG_OID(0); + Oid typid; + PartRelationInfo *prel; - /* Fetch parent & write down search status */ - parent = get_parent_of_partition(partition, &parent_search); + prel = get_pathman_relation_info(relid); + shout_if_prel_is_invalid(relid, prel, PT_ANY); - /* We MUST be sure :) */ - Assert(parent_search != PPS_NOT_SURE); + typid = prel->ev_type; - /* It must be parent known by pg_pathman */ - if (parent_search == PPS_ENTRY_PART_PARENT) - PG_RETURN_OID(parent); - else - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"%s\" is not a partition", - get_rel_name_or_relid(partition)))); + close_pathman_relation_info(prel); - PG_RETURN_NULL(); - } + PG_RETURN_OID(typid); } /* - * Extract basic type of a domain. + * Return cooked partition key. */ Datum -get_base_type_pl(PG_FUNCTION_ARGS) +get_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); + /* Values extracted from PATHMAN_CONFIG */ + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + + Oid relid = PG_GETARG_OID(0); + char *expr_cstr; + Node *expr; + char *cooked_cstr; + + /* Check that table is registered in PATHMAN_CONFIG */ + if (!pathman_config_contains_relation(relid, values, isnull, NULL, NULL)) + elog(ERROR, "table \"%s\" is not partitioned", + get_rel_name_or_relid(relid)); + + expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + expr = cook_partitioning_expression(relid, expr_cstr, NULL); + +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + cooked_cstr = nodeToStringWithLocations(expr); +#else + cooked_cstr = nodeToString(expr); +#endif + + pfree(expr_cstr); + pfree(expr); + + PG_RETURN_DATUM(CStringGetTextDatum(cooked_cstr)); } /* - * Return partition key type. + * Return cached cooked partition key. + * + * Used in tests for invalidation. */ Datum -get_partition_key_type(PG_FUNCTION_ARGS) +get_cached_partition_cooked_key_pl(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); - const PartRelationInfo *prel; + Oid relid = PG_GETARG_OID(0); + PartRelationInfo *prel; + Datum res; prel = get_pathman_relation_info(relid); shout_if_prel_is_invalid(relid, prel, PT_ANY); - PG_RETURN_OID(prel->ev_type); +#if PG_VERSION_NUM >= 170000 /* for commit d20d8fbd3e4d */ + res = CStringGetTextDatum(nodeToStringWithLocations(prel->expr)); +#else + res = CStringGetTextDatum(nodeToString(prel->expr)); +#endif + + close_pathman_relation_info(prel); + + PG_RETURN_DATUM(res); } /* - * Return tablespace name of a specified relation. + * Extract basic type of a domain. + */ +Datum +get_base_type_pl(PG_FUNCTION_ARGS) +{ + PG_RETURN_OID(getBaseType(PG_GETARG_OID(0))); +} + +/* + * Return tablespace name of a specified relation which must not be + * natively partitioned. */ Datum get_tablespace_pl(PG_FUNCTION_ARGS) @@ -207,7 +238,7 @@ get_tablespace_pl(PG_FUNCTION_ARGS) /* If tablespace id is InvalidOid then use the default tablespace */ if (!OidIsValid(tablespace_id)) { - tablespace_id = GetDefaultTablespace(get_rel_persistence(relid)); + tablespace_id = GetDefaultTablespaceCompat(get_rel_persistence(relid), false); /* If tablespace is still invalid then use database's default */ if (!OidIsValid(tablespace_id)) @@ -243,24 +274,29 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) funccxt = SRF_FIRSTCALL_INIT(); + if (!TopPathmanContext) + { + elog(ERROR, "pg_pathman's memory contexts are not initialized yet"); + } + old_mcxt = MemoryContextSwitchTo(funccxt->multi_call_memory_ctx); usercxt = (show_cache_stats_cxt *) palloc(sizeof(show_cache_stats_cxt)); usercxt->pathman_contexts[0] = TopPathmanContext; - usercxt->pathman_contexts[1] = PathmanRelationCacheContext; - usercxt->pathman_contexts[2] = PathmanParentCacheContext; - usercxt->pathman_contexts[3] = PathmanBoundCacheContext; + usercxt->pathman_contexts[1] = PathmanParentsCacheContext; + usercxt->pathman_contexts[2] = PathmanStatusCacheContext; + usercxt->pathman_contexts[3] = PathmanBoundsCacheContext; usercxt->pathman_htables[0] = NULL; /* no HTAB for this entry */ - usercxt->pathman_htables[1] = partitioned_rels; - usercxt->pathman_htables[2] = parent_cache; - usercxt->pathman_htables[3] = bound_cache; + usercxt->pathman_htables[1] = parents_cache; + usercxt->pathman_htables[2] = status_cache; + usercxt->pathman_htables[3] = bounds_cache; usercxt->current_item = 0; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_cache_stats, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_cache_stats, false); TupleDescInitEntry(tupdesc, Anum_pathman_cs_context, "context", TEXTOID, -1, 0); @@ -297,7 +333,7 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) current_htab = usercxt->pathman_htables[usercxt->current_item]; values[Anum_pathman_cs_context - 1] = - CStringGetTextDatum(simpify_mcxt_name(current_mcxt)); + CStringGetTextDatum(simplify_mcxt_name(current_mcxt)); /* We can't check stats of mcxt prior to 9.6 */ #if PG_VERSION_NUM >= 90600 @@ -342,6 +378,9 @@ show_cache_stats_internal(PG_FUNCTION_ARGS) /* * List all existing partitions and their parents. + * + * In >=13 (bc8393cf277) struct SPITupleTable was changed + * (free removed and numvals added) */ Datum show_partition_list_internal(PG_FUNCTION_ARGS) @@ -364,16 +403,21 @@ show_partition_list_internal(PG_FUNCTION_ARGS) usercxt = (show_partition_list_cxt *) palloc(sizeof(show_partition_list_cxt)); /* Open PATHMAN_CONFIG with latest snapshot available */ - usercxt->pathman_config = heap_open(get_pathman_config_relid(false), + usercxt->pathman_config = heap_open_compat(get_pathman_config_relid(false), AccessShareLock); usercxt->snapshot = RegisterSnapshot(GetLatestSnapshot()); +#if PG_VERSION_NUM >= 120000 + usercxt->pathman_config_scan = table_beginscan(usercxt->pathman_config, + usercxt->snapshot, 0, NULL); +#else usercxt->pathman_config_scan = heap_beginscan(usercxt->pathman_config, usercxt->snapshot, 0, NULL); +#endif usercxt->current_prel = NULL; /* Create tuple descriptor */ - tupdesc = CreateTemplateTupleDesc(Natts_pathman_partition_list, false); + tupdesc = CreateTemplateTupleDescCompat(Natts_pathman_partition_list, false); TupleDescInitEntry(tupdesc, Anum_pathman_pl_parent, "parent", REGCLASSOID, -1, 0); @@ -403,7 +447,12 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable->tuptabcxt = tuptab_mcxt; /* Set up initial allocations */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced = PART_RELS_SIZE * CHILD_FACTOR; + tuptable->numvals = 0; +#else tuptable->alloced = tuptable->free = PART_RELS_SIZE * CHILD_FACTOR; +#endif tuptable->vals = (HeapTuple *) palloc(tuptable->alloced * sizeof(HeapTuple)); MemoryContextSwitchTo(old_mcxt); @@ -411,10 +460,10 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Iterate through pathman cache */ for (;;) { - const PartRelationInfo *prel; - HeapTuple htup; - Datum values[Natts_pathman_partition_list]; - bool isnull[Natts_pathman_partition_list] = { 0 }; + HeapTuple htup; + Datum values[Natts_pathman_partition_list]; + bool isnull[Natts_pathman_partition_list] = { 0 }; + PartRelationInfo *prel; /* Fetch next PartRelationInfo if needed */ if (usercxt->current_prel == NULL) @@ -450,6 +499,9 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* If we've run out of partitions, switch to the next 'prel' */ if (usercxt->child_number >= PrelChildrenCount(prel)) { + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + usercxt->current_prel = NULL; usercxt->child_number = 0; @@ -486,8 +538,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->min)) { Datum rmin = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->min), - prel->ev_type)); + BoundToCString(&re->min, + prel->ev_type)); values[Anum_pathman_pl_range_min - 1] = rmin; } @@ -497,8 +549,8 @@ show_partition_list_internal(PG_FUNCTION_ARGS) if (!IsInfinite(&re->max)) { Datum rmax = CStringGetTextDatum( - datum_to_cstring(BoundGetValue(&re->max), - prel->ev_type)); + BoundToCString(&re->max, + prel->ev_type)); values[Anum_pathman_pl_range_max - 1] = rmax; } @@ -516,20 +568,34 @@ show_partition_list_internal(PG_FUNCTION_ARGS) /* Form output tuple */ htup = heap_form_tuple(funccxt->tuple_desc, values, isnull); +#if PG_VERSION_NUM >= 130000 + if (tuptable->numvals == tuptable->alloced) +#else if (tuptable->free == 0) +#endif { /* Double the size of the pointer array */ +#if PG_VERSION_NUM >= 130000 + tuptable->alloced += tuptable->alloced; +#else tuptable->free = tuptable->alloced; tuptable->alloced += tuptable->free; +#endif tuptable->vals = (HeapTuple *) repalloc_huge(tuptable->vals, tuptable->alloced * sizeof(HeapTuple)); } +#if PG_VERSION_NUM >= 130000 + /* Add tuple to table and increase 'numvals' */ + tuptable->vals[tuptable->numvals] = htup; + (tuptable->numvals)++; +#else /* Add tuple to table and decrement 'free' */ tuptable->vals[tuptable->alloced - tuptable->free] = htup; (tuptable->free)--; +#endif MemoryContextSwitchTo(old_mcxt); @@ -538,9 +604,13 @@ show_partition_list_internal(PG_FUNCTION_ARGS) } /* Clean resources */ +#if PG_VERSION_NUM >= 120000 + table_endscan(usercxt->pathman_config_scan); +#else heap_endscan(usercxt->pathman_config_scan); +#endif UnregisterSnapshot(usercxt->snapshot); - heap_close(usercxt->pathman_config, AccessShareLock); + heap_close_compat(usercxt->pathman_config, AccessShareLock); usercxt->child_number = 0; } @@ -550,7 +620,11 @@ show_partition_list_internal(PG_FUNCTION_ARGS) tuptable = usercxt->tuptable; /* Iterate through used slots */ +#if PG_VERSION_NUM >= 130000 + if (usercxt->child_number < tuptable->numvals) +#else if (usercxt->child_number < (tuptable->alloced - tuptable->free)) +#endif { HeapTuple htup = usercxt->tuptable->vals[usercxt->child_number++]; @@ -610,6 +684,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'relid' should not be NULL"))); @@ -625,7 +700,7 @@ validate_expression(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -644,85 +719,57 @@ is_date_type(PG_FUNCTION_ARGS) PG_RETURN_BOOL(is_date_type_internal(PG_GETARG_OID(0))); } - +/* + * Bail out with ERROR if rel1 tuple can't be converted to rel2 tuple. + */ Datum is_tuple_convertible(PG_FUNCTION_ARGS) { Relation rel1, rel2; - bool res = true; +#if PG_VERSION_NUM >= 130000 + AttrMap *map; /* we don't actually need it */ +#else + void *map; /* we don't actually need it */ +#endif - rel1 = heap_open(PG_GETARG_OID(0), AccessShareLock); - rel2 = heap_open(PG_GETARG_OID(1), AccessShareLock); + rel1 = heap_open_compat(PG_GETARG_OID(0), AccessShareLock); + rel2 = heap_open_compat(PG_GETARG_OID(1), AccessShareLock); - PG_TRY(); - { - void *map; /* we don't actually need it */ - - /* Try to build a conversion map */ - map = convert_tuples_by_name_map(RelationGetDescr(rel1), - RelationGetDescr(rel2), - ERR_PART_DESC_CONVERT); - /* Now free map */ - pfree(map); - } - PG_CATCH(); - { - res = false; - } - PG_END_TRY(); + /* Try to build a conversion map */ +#if PG_VERSION_NUM >= 160000 /* for commit ad86d159b6ab */ + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2), false); +#elif PG_VERSION_NUM >= 130000 + map = build_attrmap_by_name(RelationGetDescr(rel1), + RelationGetDescr(rel2)); +#else + map = convert_tuples_by_name_map(RelationGetDescr(rel1), + RelationGetDescr(rel2), + ERR_PART_DESC_CONVERT); +#endif + + /* Now free map */ +#if PG_VERSION_NUM >= 130000 + free_attrmap(map); +#else + pfree(map); +#endif - heap_close(rel1, AccessShareLock); - heap_close(rel2, AccessShareLock); + heap_close_compat(rel1, AccessShareLock); + heap_close_compat(rel2, AccessShareLock); - PG_RETURN_BOOL(res); + /* still return true to avoid changing tests */ + PG_RETURN_BOOL(true); } + /* * ------------------------ * Useful string builders * ------------------------ */ -Datum -build_update_trigger_name(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - const char *result; - - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%u\" does not exist", relid))); - - result = quote_identifier(build_update_trigger_name_internal(relid)); - - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - -Datum -build_update_trigger_func_name(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - Oid nspid; - const char *result, - *func_name; - - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%u\" does not exist", relid))); - - nspid = get_rel_namespace(relid); - - func_name = build_update_trigger_func_name_internal(relid); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(func_name)); - - PG_RETURN_TEXT_P(cstring_to_text(result)); -} - Datum build_check_constraint_name(PG_FUNCTION_ARGS) { @@ -737,13 +784,13 @@ build_check_constraint_name(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(quote_identifier(result))); } + /* * ------------------------ * Cache & config updates * ------------------------ */ - /* * Try to add previously partitioned table to PATHMAN_CONFIG. */ @@ -763,13 +810,16 @@ add_to_pathman_config(PG_FUNCTION_ARGS) HeapTuple htup; Oid expr_type; - Datum expr_datum; - PathmanInitState init_state; + volatile PathmanInitState init_state; + + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); if (!PG_ARGISNULL(0)) { relid = PG_GETARG_OID(0); + check_relation_oid(relid); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'parent_relid' should not be NULL"))); @@ -784,7 +834,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) if (!PG_ARGISNULL(1)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("'expression' should not be NULL"))); @@ -828,7 +878,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } /* Parse and check expression */ - expr_datum = cook_partitioning_expression(relid, expression, &expr_type); + cook_partitioning_expression(relid, expression, &expr_type); /* Canonicalize user's expression (trim whitespaces etc) */ expression = canonicalize_partitioning_expression(relid, expression); @@ -856,16 +906,16 @@ add_to_pathman_config(PG_FUNCTION_ARGS) values[Anum_pathman_config_expr - 1] = CStringGetTextDatum(expression); isnull[Anum_pathman_config_expr - 1] = false; - values[Anum_pathman_config_cooked_expr - 1] = expr_datum; - isnull[Anum_pathman_config_cooked_expr - 1] = false; - /* Insert new row into PATHMAN_CONFIG */ - pathman_config = heap_open(get_pathman_config_relid(false), RowExclusiveLock); + pathman_config = heap_open_compat(get_pathman_config_relid(false), RowExclusiveLock); htup = heap_form_tuple(RelationGetDescr(pathman_config), values, isnull); CatalogTupleInsert(pathman_config, htup); - heap_close(pathman_config, RowExclusiveLock); + heap_close_compat(pathman_config, RowExclusiveLock); + + /* Make changes visible */ + CommandCounterIncrement(); /* Update caches only if this relation has children */ if (FCS_FOUND == find_inheritance_children_array(relid, NoLock, true, @@ -874,19 +924,17 @@ add_to_pathman_config(PG_FUNCTION_ARGS) { pfree(children); - /* Now try to create a PartRelationInfo */ PG_TRY(); { /* Some flags might change during refresh attempt */ save_pathman_init_state(&init_state); - refresh_pathman_relation_info(relid, - values, - false); /* initialize immediately */ + /* Now try to create a PartRelationInfo */ + has_pathman_relation_info(relid); } PG_CATCH(); { - /* We have to restore all changed flags */ + /* We have to restore changed flags */ restore_pathman_init_state(&init_state); /* Rethrow ERROR */ @@ -902,7 +950,7 @@ add_to_pathman_config(PG_FUNCTION_ARGS) Oid naming_seq; naming_seq_rv = makeRangeVar(get_namespace_name(get_rel_namespace(relid)), - build_sequence_name_internal(relid), + build_sequence_name_relid_internal(relid), -1); naming_seq = RangeVarGetRelid(naming_seq_rv, AccessShareLock, true); @@ -919,10 +967,11 @@ add_to_pathman_config(PG_FUNCTION_ARGS) } } + CacheInvalidateRelcacheByRelid(relid); + PG_RETURN_BOOL(true); } - /* * Invalidate relcache to refresh PartRelationInfo. */ @@ -931,14 +980,16 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) { TriggerData *trigdata = (TriggerData *) fcinfo->context; Oid pathman_config_params; + Oid pathman_config; Oid partrel; Datum partrel_datum; bool partrel_isnull; /* Fetch Oid of PATHMAN_CONFIG_PARAMS */ pathman_config_params = get_pathman_config_params_relid(true); + pathman_config = get_pathman_config_relid(true); - /* Handle "pg_pathman.enabled = t" case */ + /* Handle "pg_pathman.enabled = f" case */ if (!OidIsValid(pathman_config_params)) goto pathman_config_params_trigger_func_return; @@ -952,12 +1003,17 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) trigdata->tg_trigger->tgname); /* Handle wrong relation */ - if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params) - elog(ERROR, "%s: must be fired for relation \"%s\"", + if (RelationGetRelid(trigdata->tg_relation) != pathman_config_params && + RelationGetRelid(trigdata->tg_relation) != pathman_config) + elog(ERROR, "%s: must be fired for relation \"%s\" or \"%s\"", trigdata->tg_trigger->tgname, - get_rel_name(pathman_config_params)); + get_rel_name(pathman_config_params), + get_rel_name(pathman_config)); - /* Extract partitioned relation's Oid */ + /* + * Extract partitioned relation's Oid. + * Hacky: 1 is attrnum of relid for both pathman_config and pathman_config_params + */ partrel_datum = heap_getattr(trigdata->tg_trigtuple, Anum_pathman_config_params_partrel, RelationGetDescr(trigdata->tg_relation), @@ -987,12 +1043,14 @@ pathman_config_params_trigger_func(PG_FUNCTION_ARGS) */ /* - * Acquire appropriate lock on a partitioned relation. + * Prevent concurrent modifiction of partitioning schema. */ Datum prevent_part_modification(PG_FUNCTION_ARGS) { - Oid relid = PG_GETARG_OID(0); + Oid relid = PG_GETARG_OID(0); + + check_relation_oid(relid); /* Lock partitioned relation till transaction's end */ LockRelationOid(relid, ShareUpdateExclusiveLock); @@ -1006,7 +1064,21 @@ prevent_part_modification(PG_FUNCTION_ARGS) Datum prevent_data_modification(PG_FUNCTION_ARGS) { - prevent_data_modification_internal(PG_GETARG_OID(0)); + Oid relid = PG_GETARG_OID(0); + + check_relation_oid(relid); + + /* + * Check that isolation level is READ COMMITTED. + * Else we won't be able to see new rows + * which could slip through locks. + */ + if (!xact_is_level_read_committed()) + ereport(ERROR, + (errmsg("Cannot perform blocking partitioning operation"), + errdetail("Expected READ COMMITTED isolation level"))); + + LockRelationOid(relid, AccessExclusiveLock); PG_RETURN_VOID(); } @@ -1151,7 +1223,7 @@ is_operator_supported(PG_FUNCTION_ARGS) { Oid opid, typid = PG_GETARG_OID(0); - char *opname = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *opname = TextDatumGetCString(PG_GETARG_DATUM(1)); opid = compatible_oper_opid(list_make1(makeString(opname)), typid, typid, true); @@ -1160,420 +1232,6 @@ is_operator_supported(PG_FUNCTION_ARGS) } -/* - * -------------------------- - * Update trigger machinery - * -------------------------- - */ - -/* Behold: the update trigger itself */ -Datum -pathman_update_trigger_func(PG_FUNCTION_ARGS) -{ - TriggerData *trigdata = (TriggerData *) fcinfo->context; - - Relation source_rel; - - Oid parent_relid, - source_relid, - target_relid; - - HeapTuple old_tuple, - new_tuple; - - Datum value; - Oid value_type; - bool isnull; - - Oid *parts; - int nparts; - - ExprContext *econtext; - ExprState *expr_state; - MemoryContext old_mcxt; - PartParentSearch parent_search; - const PartRelationInfo *prel; - - /* Handle user calls */ - if (!CALLED_AS_TRIGGER(fcinfo)) - elog(ERROR, "this function should not be called directly"); - - /* Handle wrong fire mode */ - if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) - elog(ERROR, "%s: must be fired for row", - trigdata->tg_trigger->tgname); - - /* Make sure that trigger was fired during UPDATE command */ - if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event)) - elog(ERROR, "this function should only be used as UPDATE trigger"); - - /* Get source relation and its Oid */ - source_rel = trigdata->tg_relation; - source_relid = RelationGetRelid(trigdata->tg_relation); - - /* Fetch old & new tuples */ - old_tuple = trigdata->tg_trigtuple; - new_tuple = trigdata->tg_newtuple; - - /* Find parent relation and partitioning info */ - parent_relid = get_parent_of_partition(source_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation \"%s\" is not a partition", - RelationGetRelationName(source_rel)); - - /* Fetch partition dispatch info */ - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_ANY); - - /* Execute partitioning expression */ - econtext = CreateStandaloneExprContext(); - old_mcxt = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); - expr_state = pathman_update_trigger_build_expr_state(prel, - source_rel, - new_tuple, - &value_type); - value = ExecEvalExprCompat(expr_state, econtext, &isnull, - mult_result_handler); - MemoryContextSwitchTo(old_mcxt); - - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); - - /* Search for matching partitions */ - parts = find_partitions_for_value(value, value_type, prel, &nparts); - - /* We can free expression context now */ - FreeExprContext(econtext, false); - - if (nparts > 1) - elog(ERROR, ERR_PART_ATTR_MULTIPLE); - else if (nparts == 0) - { - target_relid = create_partitions_for_value(parent_relid, - value, value_type); - - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent_relid, NULL); - } - else target_relid = parts[0]; - - pfree(parts); - - /* Convert tuple if target partition has changed */ - if (target_relid != source_relid) - { - Relation target_rel; - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE */ - - /* Lock partition and check if it exists */ - LockRelationOid(target_relid, lockmode); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(target_relid))) - elog(ERROR, ERR_PART_ATTR_NO_PART, datum_to_cstring(value, value_type)); - - /* Open partition */ - target_rel = heap_open(target_relid, lockmode); - - /* Move tuple from source relation to the selected partition */ - pathman_update_trigger_func_move_tuple(source_rel, target_rel, - old_tuple, new_tuple); - - /* Close partition */ - heap_close(target_rel, lockmode); - - /* We've made some changes */ - PG_RETURN_VOID(); - } - - /* Just return NEW tuple */ - PG_RETURN_POINTER(new_tuple); -} - -struct replace_vars_cxt -{ - HeapTuple new_tuple; - TupleDesc tuple_desc; - AttrNumber *attributes_map; -}; - -/* Replace Vars with values from 'new_tuple' (Consts) */ -static Node * -replace_vars_with_consts(Node *node, struct replace_vars_cxt *ctx) -{ - const TypeCacheEntry *typcache; - - if (IsA(node, Var)) - { - Var *var = (Var *) node; - AttrNumber varattno = ctx->attributes_map[var->varattno - 1]; - Oid vartype; - Const *new_const = makeNode(Const); - HeapTuple htup; - - Assert(var->varno == PART_EXPR_VARNO); - if (varattno == 0) - elog(ERROR, ERR_PART_DESC_CONVERT); - - /* we suppose that type can be different from parent */ - vartype = ctx->tuple_desc->attrs[varattno - 1]->atttypid; - - htup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(vartype)); - if (HeapTupleIsValid(htup)) - { - Form_pg_type typtup = (Form_pg_type) GETSTRUCT(htup); - new_const->consttypmod = typtup->typtypmod; - new_const->constcollid = typtup->typcollation; - ReleaseSysCache(htup); - } - else elog(ERROR, "cache lookup failed for type %u", vartype); - - typcache = lookup_type_cache(vartype, 0); - new_const->constbyval = typcache->typbyval; - new_const->constlen = typcache->typlen; - new_const->consttype = vartype; - new_const->location = -1; - - /* extract value from NEW tuple */ - new_const->constvalue = heap_getattr(ctx->new_tuple, - varattno, - ctx->tuple_desc, - &new_const->constisnull); - return (Node *) new_const; - } - - return expression_tree_mutator(node, replace_vars_with_consts, (void *) ctx); -} - -/* - * Get attributes map between parent and child relation. - * This is simplified version of functions that return TupleConversionMap. - * And it should be faster if expression uses not all fields from relation. - */ -static AttrNumber * -pathman_update_trigger_build_attr_map(const PartRelationInfo *prel, - Relation child_rel) -{ - AttrNumber i = -1; - Oid parent_relid = PrelParentRelid(prel); - TupleDesc child_descr = RelationGetDescr(child_rel); - int natts = child_descr->natts; - AttrNumber *result = (AttrNumber *) palloc0(natts * sizeof(AttrNumber)); - - while ((i = bms_next_member(prel->expr_atts, i)) >= 0) - { - int j; - AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; - char *attname = get_attname(parent_relid, attnum); - - for (j = 0; j < natts; j++) - { - Form_pg_attribute att = child_descr->attrs[j]; - - if (att->attisdropped) - continue; /* attrMap[attnum - 1] is already 0 */ - - if (strcmp(NameStr(att->attname), attname) == 0) - { - result[attnum - 1] = (AttrNumber) (j + 1); - break; - } - } - - if (result[attnum - 1] == 0) - elog(ERROR, "Couldn't find '%s' column in child relation", attname); - } - - return result; -} - -static ExprState * -pathman_update_trigger_build_expr_state(const PartRelationInfo *prel, - Relation source_rel, - HeapTuple new_tuple, - Oid *expr_type) /* ret value #1 */ -{ - struct replace_vars_cxt ctx; - Node *expr; - ExprState *expr_state; - - ctx.new_tuple = new_tuple; - ctx.attributes_map = pathman_update_trigger_build_attr_map(prel, source_rel); - ctx.tuple_desc = RelationGetDescr(source_rel); - - expr = replace_vars_with_consts(prel->expr, &ctx); - expr_state = ExecInitExpr((Expr *) expr, NULL); - - AssertArg(expr_type); - *expr_type = exprType(expr); - - return expr_state; -} - - -/* Move tuple to new partition (delete 'old_tuple' + insert 'new_tuple') */ -static void -pathman_update_trigger_func_move_tuple(Relation source_rel, - Relation target_rel, - HeapTuple old_tuple, - HeapTuple new_tuple) -{ - TupleDesc source_tupdesc, - target_tupdesc; - HeapTuple target_tuple; - TupleConversionMap *conversion_map; - - /* HACK: use fake 'tdtypeid' in order to fool convert_tuples_by_name() */ - source_tupdesc = CreateTupleDescCopy(RelationGetDescr(source_rel)); - source_tupdesc->tdtypeid = InvalidOid; - - target_tupdesc = CreateTupleDescCopy(RelationGetDescr(target_rel)); - target_tupdesc->tdtypeid = InvalidOid; - - /* Build tuple conversion map */ - conversion_map = convert_tuples_by_name(source_tupdesc, - target_tupdesc, - ERR_PART_DESC_CONVERT); - - if (conversion_map) - { - /* Convert tuple */ - target_tuple = do_convert_tuple(new_tuple, conversion_map); - - /* Free tuple conversion map */ - free_conversion_map(conversion_map); - } - else target_tuple = new_tuple; - - /* Connect using SPI and execute a few queries */ - if (SPI_connect() == SPI_OK_CONNECT) - { - int nvalues = RelationGetDescr(target_rel)->natts; - Oid *types = palloc(nvalues * sizeof(Oid)); - Datum *values = palloc(nvalues * sizeof(Datum)); - char *nulls = palloc(nvalues * sizeof(char)); - StringInfo query = makeStringInfo(); - int i; - - /* Prepare query string */ - appendStringInfo(query, "DELETE FROM %s.%s WHERE ctid = $1", - quote_identifier(get_namespace_name( - RelationGetNamespace(source_rel))), - quote_identifier(RelationGetRelationName(source_rel))); - - /* Build singe argument */ - types[0] = TIDOID; - values[0] = PointerGetDatum(&old_tuple->t_self); - nulls[0] = ' '; - - /* DELETE FROM source_rel WHERE ctid = $1 */ - SPI_execute_with_args(query->data, 1, types, values, nulls, false, 0); - - resetStringInfo(query); - - /* Prepare query string */ - appendStringInfo(query, "INSERT INTO %s.%s VALUES (", - quote_identifier(get_namespace_name( - RelationGetNamespace(target_rel))), - quote_identifier(RelationGetRelationName(target_rel))); - for (i = 0; i < target_tupdesc->natts; i++) - { - AttrNumber attnum = i + 1; - bool isnull; - - /* Build singe argument */ - types[i] = target_tupdesc->attrs[i]->atttypid; - values[i] = heap_getattr(target_tuple, attnum, target_tupdesc, &isnull); - nulls[i] = isnull ? 'n' : ' '; - - /* Append "$N [,]" */ - appendStringInfo(query, (i != 0 ? ", $%i" : "$%i"), attnum); - } - appendStringInfoChar(query, ')'); - - /* INSERT INTO target_rel VALUES($1, $2, $3 ...) */ - SPI_execute_with_args(query->data, nvalues, types, values, nulls, false, 0); - - /* Finally close SPI connection */ - SPI_finish(); - } - /* Else emit error */ - else elog(ERROR, "could not connect using SPI"); - - /* At last, free these temporary tuple descs */ - FreeTupleDesc(source_tupdesc); - FreeTupleDesc(target_tupdesc); -} - -/* Create UPDATE triggers for all partitions */ -Datum -create_update_triggers(PG_FUNCTION_ARGS) -{ - Oid parent = PG_GETARG_OID(0); - Oid *children; - const char *trigname; - const PartRelationInfo *prel; - uint32 i; - List *columns; - - /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); - - /* Acquire trigger and attribute names */ - trigname = build_update_trigger_name_internal(parent); - - /* Create trigger for parent */ - columns = PrelExpressionColumnNames(prel); - create_single_update_trigger_internal(parent, trigname, columns); - - /* Fetch children array */ - children = PrelGetChildrenArray(prel); - - /* Create triggers for each partition */ - for (i = 0; i < PrelChildrenCount(prel); i++) - create_single_update_trigger_internal(children[i], trigname, columns); - - PG_RETURN_VOID(); -} - -/* Create an UPDATE trigger for partition */ -Datum -create_single_update_trigger(PG_FUNCTION_ARGS) -{ - Oid parent = PG_GETARG_OID(0); - Oid child = PG_GETARG_OID(1); - const char *trigname; - const PartRelationInfo *prel; - List *columns; - - /* Check that table is partitioned */ - prel = get_pathman_relation_info(parent); - shout_if_prel_is_invalid(parent, prel, PT_ANY); - - /* Acquire trigger and attribute names */ - trigname = build_update_trigger_name_internal(parent); - - /* Generate list of columns used in expression */ - columns = PrelExpressionColumnNames(prel); - create_single_update_trigger_internal(child, trigname, columns); - - PG_RETURN_VOID(); -} - -/* Check if relation has pg_pathman's update trigger */ -Datum -has_update_trigger(PG_FUNCTION_ARGS) -{ - Oid parent_relid = PG_GETARG_OID(0); - - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%u\" does not exist", parent_relid))); - - PG_RETURN_BOOL(has_update_trigger_internal(parent_relid)); -} - - /* * ------- * DEBUG @@ -1593,9 +1251,9 @@ debug_capture(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } -/* NOTE: just in case */ +/* Return pg_pathman's shared library version */ Datum -get_pathman_lib_version(PG_FUNCTION_ARGS) +pathman_version(PG_FUNCTION_ARGS) { - PG_RETURN_CSTRING(psprintf("%x", CURRENT_LIB_VERSION)); + PG_RETURN_CSTRING(CURRENT_LIB_VERSION); } diff --git a/src/pl_hash_funcs.c b/src/pl_hash_funcs.c index 4f4238f5..4b08c324 100644 --- a/src/pl_hash_funcs.c +++ b/src/pl_hash_funcs.c @@ -54,7 +54,7 @@ create_hash_partitions_internal(PG_FUNCTION_ARGS) RangeVar **rangevars = NULL; /* Check that there's no partitions yet */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot add new HASH partitions"))); @@ -119,9 +119,10 @@ Datum build_hash_condition(PG_FUNCTION_ARGS) { Oid expr_type = PG_GETARG_OID(0); - char *expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + char *expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(1)); uint32 part_count = PG_GETARG_UINT32(2), part_idx = PG_GETARG_UINT32(3); + char *pathman_schema; TypeCacheEntry *tce; @@ -141,9 +142,13 @@ build_hash_condition(PG_FUNCTION_ARGS) errmsg("no hash function for type %s", format_type_be(expr_type)))); + pathman_schema = get_namespace_name(get_pathman_schema()); + if (pathman_schema == NULL) + elog(ERROR, "pg_pathman schema not initialized"); + /* Create hash condition CSTRING */ result = psprintf("%s.get_hash_part_idx(%s(%s), %u) = %u", - get_namespace_name(get_pathman_schema()), + pathman_schema, get_func_name(tce->hash_proc), expr_cstr, part_count, diff --git a/src/pl_range_funcs.c b/src/pl_range_funcs.c index 91452ba9..19292a0a 100644 --- a/src/pl_range_funcs.c +++ b/src/pl_range_funcs.c @@ -3,7 +3,7 @@ * pl_range_funcs.c * Utility C functions for stored RANGE procedures * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -15,21 +15,33 @@ #include "utils.h" #include "xact_handling.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif +#include "access/transam.h" #include "access/xact.h" +#include "catalog/heap.h" #include "catalog/namespace.h" #include "catalog/pg_type.h" -#include "catalog/heap.h" #include "commands/tablecmds.h" #include "executor/spi.h" #include "nodes/nodeFuncs.h" #include "parser/parse_relation.h" #include "parser/parse_expr.h" #include "utils/array.h" +#if PG_VERSION_NUM >= 120000 +#include "utils/float.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/numeric.h" #include "utils/ruleutils.h" #include "utils/syscache.h" +#include "utils/snapmgr.h" + +#if PG_VERSION_NUM < 110000 +#include "catalog/pg_inherits_fn.h" +#endif #if PG_VERSION_NUM >= 100000 #include "utils/regproc.h" @@ -44,35 +56,33 @@ PG_FUNCTION_INFO_V1( create_single_range_partition_pl ); PG_FUNCTION_INFO_V1( create_range_partitions_internal ); PG_FUNCTION_INFO_V1( check_range_available_pl ); PG_FUNCTION_INFO_V1( generate_range_bounds_pl ); +PG_FUNCTION_INFO_V1( validate_interval_value ); +PG_FUNCTION_INFO_V1( split_range_partition ); +PG_FUNCTION_INFO_V1( merge_range_partitions ); +PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); PG_FUNCTION_INFO_V1( get_part_range_by_oid ); PG_FUNCTION_INFO_V1( get_part_range_by_idx ); PG_FUNCTION_INFO_V1( build_range_condition ); PG_FUNCTION_INFO_V1( build_sequence_name ); -PG_FUNCTION_INFO_V1( merge_range_partitions ); -PG_FUNCTION_INFO_V1( drop_range_partition_expand_next ); -PG_FUNCTION_INFO_V1( validate_interval_value ); +static ArrayType *construct_bounds_array(Bound *elems, + int nelems, + Oid elmtype, + int elmlen, + bool elmbyval, + char elmalign); + static char *deparse_constraint(Oid relid, Node *expr); -static ArrayType *construct_infinitable_array(Bound *elems, - int nelems, - Oid elmtype, - int elmlen, - bool elmbyval, - char elmalign); -static void check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges); -static void merge_range_partitions_internal(Oid parent, - Oid *parts, - uint32 nparts); + static void modify_range_constraint(Oid partition_relid, const char *expression, Oid expression_type, const Bound *lower, const Bound *upper); -static char *get_qualified_rel_name(Oid relid); -static void drop_table_by_oid(Oid relid); + static bool interval_is_trivial(Oid atttype, Datum interval, Oid interval_type); @@ -146,7 +156,7 @@ create_single_range_partition_pl(PG_FUNCTION_ARGS) /* Fetch 'tablespace' */ if (!PG_ARGISNULL(4)) { - tablespace = TextDatumGetCString(PG_GETARG_TEXT_P(4)); + tablespace = TextDatumGetCString(PG_GETARG_DATUM(4)); } else tablespace = NULL; /* default */ @@ -385,224 +395,240 @@ generate_range_bounds_pl(PG_FUNCTION_ARGS) PG_RETURN_ARRAYTYPE_P(array); } - /* - * ------------------------ - * Various useful getters - * ------------------------ - */ - -/* - * Returns range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the partition's Oid. + * Takes text representation of interval value and checks + * if it corresponds to partitioning expression. + * NOTE: throws an ERROR if it fails to convert text to Datum. */ Datum -get_part_range_by_oid(PG_FUNCTION_ARGS) +validate_interval_value(PG_FUNCTION_ARGS) { - Oid partition_relid, - parent_relid; - PartParentSearch parent_search; - RangeEntry *ranges; - const PartRelationInfo *prel; - uint32 i; +#define ARG_PARTREL 0 +#define ARG_EXPRESSION 1 +#define ARG_PARTTYPE 2 +#define ARG_RANGE_INTERVAL 3 - if (!PG_ARGISNULL(0)) - { - partition_relid = PG_GETARG_OID(0); - } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); + Oid partrel; + PartType parttype; + char *expr_cstr; + Oid expr_type; - parent_relid = get_parent_of_partition(partition_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + if (PG_ARGISNULL(ARG_PARTREL)) + { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" is not a partition", - get_rel_name_or_relid(partition_relid)))); + errmsg("'partrel' should not be NULL"))); + } + else partrel = PG_GETARG_OID(ARG_PARTREL); - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + /* Check that relation exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) + elog(ERROR, "relation \"%u\" does not exist", partrel); - /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 1)) != getBaseType(prel->ev_type)) + if (PG_ARGISNULL(ARG_EXPRESSION)) + { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); + errmsg("'expression' should not be NULL"))); + } + else expr_cstr = TextDatumGetCString(PG_GETARG_DATUM(ARG_EXPRESSION)); - ranges = PrelGetRangesArray(prel); + if (PG_ARGISNULL(ARG_PARTTYPE)) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parttype' should not be NULL"))); + } + else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); - /* Look for the specified partition */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == partition_relid) - { - ArrayType *arr; - Bound elems[2]; + /* + * Try to parse partitioning expression, could fail with ERROR. + */ + cook_partitioning_expression(partrel, expr_cstr, &expr_type); - elems[0] = ranges[i].min; - elems[1] = ranges[i].max; + /* + * NULL interval is fine for both HASH and RANGE. + * But for RANGE we need to make some additional checks. + */ + if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) + { + Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), + interval_value; + Oid interval_type; - arr = construct_infinitable_array(elems, 2, - prel->ev_type, prel->ev_len, - prel->ev_byval, prel->ev_align); + if (parttype == PT_HASH) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should be NULL for HASH partitioned table"))); - PG_RETURN_ARRAYTYPE_P(arr); - } + /* Try converting textual representation */ + interval_value = extract_binary_interval_from_text(interval_text, + expr_type, + &interval_type); - /* No partition found, report error */ - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("relation \"%s\" has no partition \"%s\"", - get_rel_name_or_relid(parent_relid), - get_rel_name_or_relid(partition_relid)))); + /* Check that interval isn't trivial */ + if (interval_is_trivial(expr_type, interval_value, interval_type)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("interval should not be trivial"))); + } - PG_RETURN_NULL(); /* keep compiler happy */ + PG_RETURN_BOOL(true); } -/* - * Returns N-th range entry (min, max) (in form of array). - * - * arg #1 is the parent's Oid. - * arg #2 is the index of the range - * (if it is negative then the last range will be returned). - */ Datum -get_part_range_by_idx(PG_FUNCTION_ARGS) +split_range_partition(PG_FUNCTION_ARGS) { - Oid parent_relid; - int partition_idx = 0; - Bound elems[2]; - RangeEntry *ranges; - const PartRelationInfo *prel; + Oid parent = InvalidOid, + partition1, + partition2; + RangeVar *part_name = NULL; + char *tablespace_name = NULL; + + Datum pivot_value; + Oid pivot_type; + + PartRelationInfo *prel; + Bound min_bound, + max_bound, + split_bound; + + Snapshot fresh_snapshot; + FmgrInfo finfo; + SPIPlanPtr plan; + char *query; + int i; if (!PG_ARGISNULL(0)) { - parent_relid = PG_GETARG_OID(0); + partition1 = PG_GETARG_OID(0); + check_relation_oid(partition1); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parent_relid' should not be NULL"))); + errmsg("'partition1' should not be NULL"))); if (!PG_ARGISNULL(1)) { - partition_idx = PG_GETARG_INT32(1); + pivot_value = PG_GETARG_DATUM(1); + pivot_type = get_fn_expr_argtype(fcinfo->flinfo, 1); } else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_idx' should not be NULL"))); + errmsg("'split_value' should not be NULL"))); - prel = get_pathman_relation_info(parent_relid); - shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + LockRelationOid(partition1, ExclusiveLock); - /* Check type of 'dummy' (for correct output) */ - if (getBaseType(get_fn_expr_argtype(fcinfo->flinfo, 2)) != getBaseType(prel->ev_type)) + /* Get parent of partition */ + parent = get_parent_of_partition(partition1); + if (!OidIsValid(parent)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("pg_typeof(dummy) should be %s", - format_type_be(getBaseType(prel->ev_type))))); - + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition1)))); - /* Now we have to deal with 'idx' */ - if (partition_idx < -1) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("negative indices other than -1" - " (last partition) are not allowed"))); - } - else if (partition_idx == -1) - { - partition_idx = PrelLastChild(prel); - } - else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partition #%d does not exist (total amount is %u)", - partition_idx, PrelChildrenCount(prel)))); - } + /* This partition should not have children */ + if (has_pathman_relation_info(partition1)) + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot split partition that has children"))); - ranges = PrelGetRangesArray(prel); + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); - /* Build args for construct_infinitable_array() */ - elems[0] = ranges[partition_idx].min; - elems[1] = ranges[partition_idx].max; + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent); + shout_if_prel_is_invalid(parent, prel, PT_RANGE); - PG_RETURN_ARRAYTYPE_P(construct_infinitable_array(elems, 2, - prel->ev_type, - prel->ev_len, - prel->ev_byval, - prel->ev_align)); -} + i = PrelHasPartition(prel, partition1) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); + min_bound = PrelGetRangesArray(prel)[i].min; + max_bound = PrelGetRangesArray(prel)[i].max; -/* - * ------------------------ - * Useful string builders - * ------------------------ - */ + split_bound = MakeBound(perform_type_cast(pivot_value, + getBaseType(pivot_type), + getBaseType(prel->ev_type), + NULL)); -/* Build range condition for a CHECK CONSTRAINT. */ -Datum -build_range_condition(PG_FUNCTION_ARGS) -{ - Oid partition_relid; - char *expression; - Node *expr; + fmgr_info(prel->cmp_proc, &finfo); - Bound min, - max; - Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); - Constraint *con; - char *result; + /* Validate pivot's value */ + if (cmp_bounds(&finfo, prel->ev_collid, &split_bound, &min_bound) <= 0 || + cmp_bounds(&finfo, prel->ev_collid, &split_bound, &max_bound) >= 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("specified value does not fit into the range (%s, %s)", + BoundToCString(&min_bound, prel->ev_type), + BoundToCString(&max_bound, prel->ev_type)))); + } - if (!PG_ARGISNULL(0)) + if (!PG_ARGISNULL(2)) { - partition_relid = PG_GETARG_OID(0); + part_name = makeRangeVar(get_namespace_name(get_rel_namespace(parent)), + TextDatumGetCString(PG_GETARG_DATUM(2)), + 0); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partition_relid' should not be NULL"))); - if (!PG_ARGISNULL(1)) + if (!PG_ARGISNULL(3)) { - expression = TextDatumGetCString(PG_GETARG_TEXT_P(1)); + tablespace_name = TextDatumGetCString(PG_GETARG_DATUM(3)); } - else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL")));; - min = PG_ARGISNULL(2) ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(2)); + /* Create a new partition */ + partition2 = create_single_range_partition_internal(parent, + &split_bound, + &max_bound, + prel->ev_type, + part_name, + tablespace_name); - max = PG_ARGISNULL(3) ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(PG_GETARG_DATUM(3)); + /* Make constraint visible */ + CommandCounterIncrement(); - expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); - con = build_range_check_constraint(partition_relid, - expr, - &min, &max, - bounds_type); + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect using SPI"); - result = deparse_constraint(partition_relid, con->raw_expr); + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); - PG_RETURN_TEXT_P(cstring_to_text(result)); -} + query = psprintf("WITH part_data AS ( " + "DELETE FROM %1$s WHERE (%3$s) >= $1 RETURNING " + "*) " + "INSERT INTO %2$s SELECT * FROM part_data", + get_qualified_rel_name(partition1), + get_qualified_rel_name(partition2), + prel->expr_cstr); -/* Build name for sequence for auto partition naming */ -Datum -build_sequence_name(PG_FUNCTION_ARGS) -{ - Oid parent_relid = PG_GETARG_OID(0); - Oid parent_nsp; - char *result; + plan = SPI_prepare(query, 1, &prel->ev_type); - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) - ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); - parent_nsp = get_rel_namespace(parent_relid); + SPI_execute_snapshot(plan, + &split_bound.value, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); - result = psprintf("%s.%s", - quote_identifier(get_namespace_name(parent_nsp)), - quote_identifier(build_sequence_name_internal(parent_relid))); + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); - PG_RETURN_TEXT_P(cstring_to_text(result)); -} + SPI_finish(); + + /* Drop old constraint and create a new one */ + modify_range_constraint(partition1, + prel->expr_cstr, + prel->ev_type, + &min_bound, + &split_bound); + + /* Make constraint visible */ + CommandCounterIncrement(); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition2); +} /* * Merge multiple partitions. @@ -612,17 +638,26 @@ build_sequence_name(PG_FUNCTION_ARGS) Datum merge_range_partitions(PG_FUNCTION_ARGS) { - Oid parent = InvalidOid; - PartParentSearch parent_search; + Oid parent = InvalidOid, + partition = InvalidOid; ArrayType *arr = PG_GETARG_ARRAYTYPE_P(0); - Oid *partitions; + Oid *parts; + int nparts; + Datum *datums; bool *nulls; - int nparts; int16 typlen; bool typbyval; char typalign; + + PartRelationInfo *prel; + Bound min_bound, + max_bound; + RangeEntry *bounds; + ObjectAddresses *objects = new_object_addresses(); + Snapshot fresh_snapshot; + FmgrInfo finfo; int i; /* Validate array type */ @@ -634,25 +669,29 @@ merge_range_partitions(PG_FUNCTION_ARGS) typlen, typbyval, typalign, &datums, &nulls, &nparts); - /* Extract partition Oids from array */ - partitions = palloc(sizeof(Oid) * nparts); - for (i = 0; i < nparts; i++) - partitions[i] = DatumGetObjectId(datums[i]); - if (nparts < 2) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("there must be at least two partitions"))); - /* Check if all partitions are from the same parent */ + /* Allocate arrays */ + parts = palloc(nparts * sizeof(Oid)); + bounds = palloc(nparts * sizeof(RangeEntry)); + for (i = 0; i < nparts; i++) { - Oid cur_parent = get_parent_of_partition(partitions[i], &parent_search); + Oid cur_parent; + + /* Extract partition Oids from array */ + parts[i] = DatumGetObjectId(datums[i]); + + /* Check if all partitions are from the same parent */ + cur_parent = get_parent_of_partition(parts[i]); /* If we couldn't find a parent, it's not a partition */ - if (parent_search != PPS_ENTRY_PART_PARENT) + if (!OidIsValid(cur_parent)) ereport(ERROR, (errmsg("cannot merge partitions"), errdetail("relation \"%s\" is not a partition", - get_rel_name_or_relid(partitions[i])))); + get_rel_name_or_relid(parts[i])))); /* 'parent' is not initialized */ if (parent == InvalidOid) @@ -664,72 +703,57 @@ merge_range_partitions(PG_FUNCTION_ARGS) errdetail("all relations must share the same parent"))); } - /* Now merge partitions */ - merge_range_partitions_internal(parent, partitions, nparts); - - PG_RETURN_VOID(); -} + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); -static void -merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) -{ - const PartRelationInfo *prel; - List *rentry_list = NIL; - RangeEntry *ranges, - *first, - *last; - FmgrInfo cmp_proc; - int i; + /* Prevent modification of partitions */ + for (i = 0; i < nparts; i++) + LockRelationOid(parts[i], AccessExclusiveLock); + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); - /* Fetch ranges array */ - ranges = PrelGetRangesArray(prel); - - /* Lock parent till transaction's end */ - LockRelationOid(parent, ShareUpdateExclusiveLock); - - /* Process partitions */ + /* Copy rentries from 'prel' */ for (i = 0; i < nparts; i++) { - int j; - - /* Prevent modification of partitions */ - LockRelationOid(parts[0], AccessExclusiveLock); + uint32 idx = PrelHasPartition(prel, parts[i]); + Assert(idx > 0); - /* Look for the specified partition */ - for (j = 0; j < PrelChildrenCount(prel); j++) - if (ranges[j].child_oid == parts[i]) - { - rentry_list = lappend(rentry_list, &ranges[j]); - break; - } + bounds[i] = PrelGetRangesArray(prel)[idx - 1]; } - /* Check that partitions are adjacent */ - check_range_adjacence(prel->cmp_proc, prel->ev_collid, rentry_list); + /* Sort rentries by increasing bound */ + qsort_range_entries(bounds, nparts, prel); - /* First determine the bounds of a new constraint */ - first = (RangeEntry *) linitial(rentry_list); - last = (RangeEntry *) llast(rentry_list); + fmgr_info(prel->cmp_proc, &finfo); - /* Swap ranges if 'last' < 'first' */ - fmgr_info(prel->cmp_proc, &cmp_proc); - if (cmp_bounds(&cmp_proc, prel->ev_collid, &last->min, &first->min) < 0) + /* Check that partitions are adjacent */ + for (i = 1; i < nparts; i++) { - RangeEntry *tmp = last; + Bound cur_min = bounds[i].min, + prev_max = bounds[i - 1].max; - last = first; - first = tmp; + if (cmp_bounds(&finfo, prel->ev_collid, &cur_min, &prev_max) != 0) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partitions \"%s\" and \"%s\" are not adjacent", + get_rel_name(bounds[i - 1].child_oid), + get_rel_name(bounds[i].child_oid)))); + } } + /* First determine the bounds of a new constraint */ + min_bound = bounds[0].min; + max_bound = bounds[nparts - 1].max; + partition = parts[0]; + /* Drop old constraint and create a new one */ - modify_range_constraint(parts[0], + modify_range_constraint(partition, prel->expr_cstr, prel->ev_type, - &first->min, - &last->max); + &min_bound, + &max_bound); /* Make constraint visible */ CommandCounterIncrement(); @@ -737,53 +761,97 @@ merge_range_partitions_internal(Oid parent, Oid *parts, uint32 nparts) if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "could not connect using SPI"); + /* + * Get latest snapshot to see data that might have been + * added to partitions before this transaction has started, + * but was committed a moment before we acquired the locks. + */ + fresh_snapshot = RegisterSnapshot(GetLatestSnapshot()); + /* Migrate the data from all partition to the first one */ for (i = 1; i < nparts; i++) { + ObjectAddress object; + char *query = psprintf("WITH part_data AS ( " - "DELETE FROM %s RETURNING " + "DELETE FROM %1$s RETURNING " "*) " - "INSERT INTO %s SELECT * FROM part_data", + "INSERT INTO %2$s SELECT * FROM part_data", get_qualified_rel_name(parts[i]), get_qualified_rel_name(parts[0])); - SPI_exec(query, 0); + SPIPlanPtr plan = SPI_prepare(query, 0, NULL); + + if (!plan) + elog(ERROR, "%s: SPI_prepare returned %d", + __FUNCTION__, SPI_result); + + SPI_execute_snapshot(plan, NULL, NULL, + fresh_snapshot, + InvalidSnapshot, + false, true, 0); + pfree(query); + + /* To be deleted */ + ObjectAddressSet(object, RelationRelationId, parts[i]); + add_exact_object_address(&object, objects); } + /* Free snapshot */ + UnregisterSnapshot(fresh_snapshot); + SPI_finish(); /* Drop obsolete partitions */ - for (i = 1; i < nparts; i++) - drop_table_by_oid(parts[i]); -} + performMultipleDeletions(objects, DROP_CASCADE, 0); + free_object_addresses(objects); + + pfree(bounds); + pfree(parts); + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_OID(partition); +} /* * Drops partition and expands the next partition - * so that it could cover the dropped one + * so that it could cover the dropped one. * - * This function was written in order to support Oracle-like ALTER TABLE ... - * DROP PARTITION. In Oracle partitions only have upper bound and when - * partition is dropped the next one automatically covers freed range + * This function was written in order to support + * Oracle-like ALTER TABLE ... DROP PARTITION. + * + * In Oracle partitions only have upper bound and when partition + * is dropped the next one automatically covers freed range. */ Datum drop_range_partition_expand_next(PG_FUNCTION_ARGS) { - const PartRelationInfo *prel; - PartParentSearch parent_search; - Oid relid = PG_GETARG_OID(0), - parent; - RangeEntry *ranges; - int i; + Oid partition = PG_GETARG_OID(0), + parent; + PartRelationInfo *prel; + ObjectAddress object; + RangeEntry *ranges; + int i; + + check_relation_oid(partition); + + /* Lock the partition we're going to drop */ + LockRelationOid(partition, AccessExclusiveLock); /* Get parent's relid */ - parent = get_parent_of_partition(relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) - elog(ERROR, "relation \"%s\" is not a partition", - get_rel_name_or_relid(relid)); + parent = get_parent_of_partition(partition); + if (!OidIsValid(parent)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition)))); - /* Fetch PartRelationInfo and perform some checks */ + /* Prevent changes in partitioning scheme */ + LockRelationOid(parent, ShareUpdateExclusiveLock); + + /* Emit an error if it is not partitioned by RANGE */ prel = get_pathman_relation_info(parent); shout_if_prel_is_invalid(parent, prel, PT_RANGE); @@ -791,136 +859,282 @@ drop_range_partition_expand_next(PG_FUNCTION_ARGS) ranges = PrelGetRangesArray(prel); /* Looking for partition in child relations */ - for (i = 0; i < PrelChildrenCount(prel); i++) - if (ranges[i].child_oid == relid) - break; - - /* - * It must be in ranges array because we already - * know that this table is a partition - */ - Assert(i < PrelChildrenCount(prel)); + i = PrelHasPartition(prel, partition) - 1; + Assert(i >= 0 && i < PrelChildrenCount(prel)); /* Expand next partition if it exists */ - if (i < PrelChildrenCount(prel) - 1) + if (i < PrelLastChild(prel)) { - RangeEntry *cur = &ranges[i], + RangeEntry *cur = &ranges[i], *next = &ranges[i + 1]; + Oid next_partition = next->child_oid; + LOCKMODE lockmode = AccessExclusiveLock; + + /* Lock next partition */ + LockRelationOid(next_partition, lockmode); - /* Drop old constraint and create a new one */ - modify_range_constraint(next->child_oid, - prel->expr_cstr, - prel->ev_type, - &cur->min, - &next->max); + /* Does next partition exist? */ + if (SearchSysCacheExists1(RELOID, ObjectIdGetDatum(next_partition))) + { + /* Stretch next partition to cover range */ + modify_range_constraint(next_partition, + prel->expr_cstr, + prel->ev_type, + &cur->min, + &next->max); + } + /* Bad luck, unlock missing partition */ + else UnlockRelationOid(next_partition, lockmode); } - /* Finally drop this partition */ - drop_table_by_oid(relid); + /* Drop partition */ + ObjectAddressSet(object, RelationRelationId, partition); + performDeletion(&object, DROP_CASCADE, 0); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); PG_RETURN_VOID(); } + /* - * Takes text representation of interval value and checks - * if it corresponds to partitioning expression. - * NOTE: throws an ERROR if it fails to convert text to Datum. + * ------------------------ + * Various useful getters + * ------------------------ + */ + +/* + * Returns range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the partition's Oid. */ Datum -validate_interval_value(PG_FUNCTION_ARGS) +get_part_range_by_oid(PG_FUNCTION_ARGS) { -#define ARG_PARTREL 0 -#define ARG_EXPRESSION 1 -#define ARG_PARTTYPE 2 -#define ARG_RANGE_INTERVAL 3 -#define ARG_EXPRESSION_P 4 + Oid partition_relid, + parent_relid; + Oid arg_type; + RangeEntry *ranges; + PartRelationInfo *prel; + uint32 idx; - Oid partrel; - PartType parttype; - char *expr_cstr; - Oid expr_type; - - if (PG_ARGISNULL(ARG_PARTREL)) + if (!PG_ARGISNULL(0)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'partrel' should not be NULL"))); + partition_relid = PG_GETARG_OID(0); } - else partrel = PG_GETARG_OID(ARG_PARTREL); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); - /* Check that relation exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(partrel))) - elog(ERROR, "relation \"%u\" does not exist", partrel); + parent_relid = get_parent_of_partition(partition_relid); + if (!OidIsValid(parent_relid)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a partition", + get_rel_name_or_relid(partition_relid)))); - if (PG_ARGISNULL(ARG_EXPRESSION)) - { + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 1); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'expression' should not be NULL"))); + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); + + ranges = PrelGetRangesArray(prel); + + /* Look for the specified partition */ + if ((idx = PrelHasPartition(prel, partition_relid)) > 0) + { + ArrayType *arr; + Bound elems[2]; + + elems[0] = ranges[idx - 1].min; + elems[1] = ranges[idx - 1].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); } - else expr_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION)); - if (PG_ARGISNULL(ARG_PARTTYPE)) + /* No partition found, report error */ + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" has no partition \"%s\"", + get_rel_name_or_relid(parent_relid), + get_rel_name_or_relid(partition_relid)))); + + PG_RETURN_NULL(); /* keep compiler happy */ +} + +/* + * Returns N-th range entry (min, max) (in form of array). + * + * arg #1 is the parent's Oid. + * arg #2 is the index of the range + * (if it is negative then the last range will be returned). + */ +Datum +get_part_range_by_idx(PG_FUNCTION_ARGS) +{ + Oid parent_relid; + int partition_idx = 0; + Oid arg_type; + Bound elems[2]; + RangeEntry *ranges; + PartRelationInfo *prel; + ArrayType *arr; + + if (!PG_ARGISNULL(0)) { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("'parttype' should not be NULL"))); + parent_relid = PG_GETARG_OID(0); } - else parttype = DatumGetPartType(PG_GETARG_DATUM(ARG_PARTTYPE)); + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'parent_relid' should not be NULL"))); - /* - * Fetch partitioning expression's type using - * either user's expression or parsed expression. - */ - if (PG_ARGISNULL(ARG_EXPRESSION_P)) + if (!PG_ARGISNULL(1)) { - Datum expr_datum; + partition_idx = PG_GETARG_INT32(1); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_idx' should not be NULL"))); + + /* Emit an error if it is not partitioned by RANGE */ + prel = get_pathman_relation_info(parent_relid); + shout_if_prel_is_invalid(parent_relid, prel, PT_RANGE); + + /* Check type of 'dummy' (for correct output) */ + arg_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + if (getBaseType(arg_type) != getBaseType(prel->ev_type)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("pg_typeof(dummy) should be %s", + format_type_be(getBaseType(prel->ev_type))))); - /* We'll have to parse expression with our own hands */ - expr_datum = cook_partitioning_expression(partrel, expr_cstr, &expr_type); - /* Free both expressions */ - pfree(DatumGetPointer(expr_datum)); - pfree(expr_cstr); + /* Now we have to deal with 'idx' */ + if (partition_idx < -1) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("negative indices other than -1" + " (last partition) are not allowed"))); + } + else if (partition_idx == -1) + { + partition_idx = PrelLastChild(prel); } - else + else if (((uint32) abs(partition_idx)) >= PrelChildrenCount(prel)) { - char *expr_p_cstr; + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("partition #%d does not exist (total amount is %u)", + partition_idx, PrelChildrenCount(prel)))); + } + + ranges = PrelGetRangesArray(prel); + + /* Build args for construct_infinitable_array() */ + elems[0] = ranges[partition_idx].min; + elems[1] = ranges[partition_idx].max; + + arr = construct_bounds_array(elems, 2, + prel->ev_type, + prel->ev_len, + prel->ev_byval, + prel->ev_align); + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + + PG_RETURN_ARRAYTYPE_P(arr); +} + - /* Good, let's use a cached parsed expression */ - expr_p_cstr = TextDatumGetCString(PG_GETARG_TEXT_P(ARG_EXPRESSION_P)); - expr_type = exprType(stringToNode(expr_p_cstr)); +/* + * ------------------------ + * Useful string builders + * ------------------------ + */ + +/* Build range condition for a CHECK CONSTRAINT. */ +Datum +build_range_condition(PG_FUNCTION_ARGS) +{ + Oid partition_relid; + char *expression; + Node *expr; + + Bound min, + max; + Oid bounds_type = get_fn_expr_argtype(fcinfo->flinfo, 2); + Constraint *con; + char *result; - /* Free both expressions */ - pfree(expr_p_cstr); - pfree(expr_cstr); + if (!PG_ARGISNULL(0)) + { + partition_relid = PG_GETARG_OID(0); } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' should not be NULL"))); + if (partition_relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'partition_relid' must be normal object oid"))); - /* - * NULL interval is fine for both HASH and RANGE. - * But for RANGE we need to make some additional checks. - */ - if (!PG_ARGISNULL(ARG_RANGE_INTERVAL)) + if (!PG_ARGISNULL(1)) { - Datum interval_text = PG_GETARG_DATUM(ARG_RANGE_INTERVAL), - interval_value; - Oid interval_type; + expression = TextDatumGetCString(PG_GETARG_DATUM(1)); + } + else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("'expression' should not be NULL")));; - if (parttype == PT_HASH) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should be NULL for HASH partitioned table"))); + /* lock the partition */ + LockRelationOid(partition_relid, ShareUpdateExclusiveLock); + min = PG_ARGISNULL(2) ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(2)); - /* Try converting textual representation */ - interval_value = extract_binary_interval_from_text(interval_text, - expr_type, - &interval_type); + max = PG_ARGISNULL(3) ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(PG_GETARG_DATUM(3)); - /* Check that interval isn't trivial */ - if (interval_is_trivial(expr_type, interval_value, interval_type)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval should not be trivial"))); - } + expr = parse_partitioning_expression(partition_relid, expression, NULL, NULL); + con = build_range_check_constraint(partition_relid, + expr, + &min, &max, + bounds_type); - PG_RETURN_BOOL(true); + result = deparse_constraint(partition_relid, con->raw_expr); + + PG_RETURN_TEXT_P(cstring_to_text(result)); +} + +/* Build name for sequence for auto partition naming */ +Datum +build_sequence_name(PG_FUNCTION_ARGS) +{ + Oid parent_relid = PG_GETARG_OID(0); + Oid parent_nsp; + char *seq_name; + char *result; + + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(parent_relid))) + ereport(ERROR, (errmsg("relation \"%u\" does not exist", parent_relid))); + + parent_nsp = get_rel_namespace(parent_relid); + seq_name = build_sequence_name_relid_internal(parent_relid); + + result = psprintf("%s.%s", + quote_identifier(get_namespace_name(parent_nsp)), + quote_identifier(seq_name)); + + PG_RETURN_TEXT_P(cstring_to_text(result)); } @@ -1110,12 +1324,18 @@ modify_range_constraint(Oid partition_relid, /* * Transform constraint into cstring + * + * In >=13 (5815696bc66) result type of addRangeTableEntryForRelationCompat() was changed */ static char * deparse_constraint(Oid relid, Node *expr) { Relation rel; +#if PG_VERSION_NUM >= 130000 + ParseNamespaceItem *nsitem; +#else RangeTblEntry *rte; +#endif Node *cooked_expr; ParseState *pstate; List *context; @@ -1123,12 +1343,17 @@ deparse_constraint(Oid relid, Node *expr) context = deparse_context_for(get_rel_name(relid), relid); - rel = heap_open(relid, NoLock); + rel = heap_open_compat(relid, NoLock); /* Initialize parse state */ pstate = make_parsestate(NULL); - rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true); +#if PG_VERSION_NUM >= 130000 + nsitem = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); + addNSItemToQuery(pstate, nsitem, true, true, true); +#else + rte = addRangeTableEntryForRelationCompat(pstate, rel, AccessShareLock, NULL, false, true); addRTEtoQuery(pstate, rte, true, true, true); +#endif /* Transform constraint into executable expression (i.e. cook it) */ cooked_expr = transformExpr(pstate, expr, EXPR_KIND_CHECK_CONSTRAINT); @@ -1136,24 +1361,24 @@ deparse_constraint(Oid relid, Node *expr) /* Transform expression into string */ result = deparse_expression(cooked_expr, context, false, false); - heap_close(rel, NoLock); + heap_close_compat(rel, NoLock); return result; } /* - * Build an 1d array of Bound elements + * Build an 1d array of Bound elements. * - * The main difference from construct_array() is that - * it will substitute infinite values with NULLs + * The main difference from construct_array() is that + * it will substitute infinite values with NULLs. */ static ArrayType * -construct_infinitable_array(Bound *elems, - int nelems, - Oid elemtype, - int elemlen, - bool elembyval, - char elemalign) +construct_bounds_array(Bound *elems, + int nelems, + Oid elemtype, + int elemlen, + bool elembyval, + char elemalign) { ArrayType *arr; Datum *datums; @@ -1180,74 +1405,3 @@ construct_infinitable_array(Bound *elems, return arr; } - -/* - * Check that range entries are adjacent - */ -static void -check_range_adjacence(Oid cmp_proc, Oid collid, List *ranges) -{ - ListCell *lc; - RangeEntry *last = NULL; - FmgrInfo finfo; - - fmgr_info(cmp_proc, &finfo); - - foreach(lc, ranges) - { - RangeEntry *cur = (RangeEntry *) lfirst(lc); - - /* Skip first iteration */ - if (!last) - { - last = cur; - continue; - } - - /* Check that last and current partitions are adjacent */ - if ((cmp_bounds(&finfo, collid, &last->max, &cur->min) != 0) && - (cmp_bounds(&finfo, collid, &cur->max, &last->min) != 0)) - { - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("partitions \"%s\" and \"%s\" are not adjacent", - get_rel_name(last->child_oid), - get_rel_name(cur->child_oid)))); - } - - last = cur; - } -} - -/* - * Return palloced fully qualified relation name as a cstring - */ -static char * -get_qualified_rel_name(Oid relid) -{ - Oid nspid = get_rel_namespace(relid); - - return psprintf("%s.%s", - quote_identifier(get_namespace_name(nspid)), - quote_identifier(get_rel_name(relid))); -} - -/* - * Drop table using it's Oid - */ -static void -drop_table_by_oid(Oid relid) -{ - DropStmt *n = makeNode(DropStmt); - const char *relname = get_qualified_rel_name(relid); - - n->removeType = OBJECT_TABLE; - n->missing_ok = false; - n->objects = list_make1(stringToQualifiedNameList(relname)); -#if PG_VERSION_NUM < 100000 - n->arguments = NIL; -#endif - n->behavior = DROP_RESTRICT; /* default behavior */ - n->concurrent = false; - - RemoveRelations(n); -} diff --git a/src/planner_tree_modification.c b/src/planner_tree_modification.c index 02f20f51..5b6a7982 100644 --- a/src/planner_tree_modification.c +++ b/src/planner_tree_modification.c @@ -3,42 +3,133 @@ * planner_tree_modification.c * Functions for query- and plan- tree modification * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * ------------------------------------------------------------------------ */ -#include "compat/expand_rte_hook.h" -#include "compat/relation_tags.h" #include "compat/rowmarks_fix.h" +#include "declarative.h" #include "partition_filter.h" +#include "partition_router.h" +#include "partition_overseer.h" #include "planner_tree_modification.h" +#include "relation_info.h" #include "rewrite/rewriteManip.h" +#if PG_VERSION_NUM >= 120000 +#include "access/table.h" +#endif #include "access/htup_details.h" +#include "foreign/fdwapi.h" #include "miscadmin.h" #include "optimizer/clauses.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "storage/lmgr.h" #include "utils/syscache.h" +/* + * Drop conflicting macros for the sake of TRANSFORM_CONTEXT_FIELD(...). + * For instance, Windows.h contains a nasty "#define DELETE". + */ +#ifdef SELECT +#undef SELECT +#endif + +#ifdef INSERT +#undef INSERT +#endif + +#ifdef UPDATE +#undef UPDATE +#endif + +#ifdef DELETE +#undef DELETE +#endif + + /* for assign_rel_parenthood_status() */ #define PARENTHOOD_TAG CppAsString(PARENTHOOD) +/* Build transform_query_cxt field name */ +#define TRANSFORM_CONTEXT_FIELD(command_type) \ + has_parent_##command_type##_query + +/* Check that transform_query_cxt field is TRUE */ +#define TRANSFORM_CONTEXT_HAS_PARENT(context, command_type) \ + ( (context)->TRANSFORM_CONTEXT_FIELD(command_type) ) + +/* Used in switch(CmdType) statements */ +#define TRANSFORM_CONTEXT_SWITCH_SET(context, command_type) \ + case CMD_##command_type: \ + (context)->TRANSFORM_CONTEXT_FIELD(command_type) = true; \ + break; \ + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE(context, query) \ + ( (context)->parent_cte && \ + (context)->parent_cte->ctequery == (Node *) (query) ) + +#define TRANSFORM_CONTEXT_QUERY_IS_CTE_SL(context, query) \ + ( (context)->parent_sublink && \ + (context)->parent_sublink->subselect == (Node *) (query) && \ + (context)->parent_sublink->subLinkType == CTE_SUBLINK ) + +/* Check if 'query' is CTE according to 'context' */ +#define TRANSFORM_CONTEXT_QUERY_IS_CTE(context, query) \ + ( TRANSFORM_CONTEXT_QUERY_IS_CTE_CTE((context), (query)) || \ + TRANSFORM_CONTEXT_QUERY_IS_CTE_SL ((context), (query)) ) + +typedef struct +{ + /* Do we have a parent CmdType query? */ + bool TRANSFORM_CONTEXT_FIELD(SELECT), + TRANSFORM_CONTEXT_FIELD(INSERT), + TRANSFORM_CONTEXT_FIELD(UPDATE), + TRANSFORM_CONTEXT_FIELD(DELETE); + + /* Parameters for handle_modification_query() */ + ParamListInfo query_params; + + /* SubLink that might contain an examined query */ + SubLink *parent_sublink; + + /* CommonTableExpr that might contain an examined query */ + CommonTableExpr *parent_cte; +} transform_query_cxt; + +typedef struct +{ + Index child_varno; + Oid parent_relid, + parent_reltype, + child_reltype; + List *translated_vars; +} adjust_appendrel_varnos_cxt; static bool pathman_transform_query_walker(Node *node, void *context); +static bool pathman_post_analyze_query_walker(Node *node, void *context); -static void disable_standard_inheritance(Query *parse); -static void handle_modification_query(Query *parse, ParamListInfo params); +static void disable_standard_inheritance(Query *parse, transform_query_cxt *context); +static void handle_modification_query(Query *parse, transform_query_cxt *context); -static void partition_filter_visitor(Plan *plan, void *context); +static Plan *partition_filter_visitor(Plan *plan, void *context); +static Plan *partition_router_visitor(Plan *plan, void *context); -static rel_parenthood_status tag_extract_parenthood_status(List *relation_tag); +static void state_visit_subplans(List *plans, void (*visitor) (PlanState *plan, void *context), void *context); +static void state_visit_members(PlanState **planstates, int nplans, void (*visitor) (PlanState *plan, void *context), void *context); +static Oid find_deepest_partition(Oid relid, Index rti, Expr *quals); static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); +static Node *adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context); +static bool inh_translation_list_is_trivial(List *translated_vars); +static bool modifytable_contains_fdw(List *rtable, ModifyTable *node); /* @@ -46,13 +137,13 @@ static Node *eval_extern_params_mutator(Node *node, ParamListInfo params); * id in order to recognize them properly. */ #define QUERY_ID_INITIAL 0 -static uint32 latest_query_id = QUERY_ID_INITIAL; +static uint64 latest_query_id = QUERY_ID_INITIAL; void assign_query_id(Query *query) { - uint32 prev_id = latest_query_id++; + uint64 prev_id = latest_query_id++; if (prev_id > latest_query_id) elog(WARNING, "assign_query_id(): queryId overflow"); @@ -68,19 +159,19 @@ reset_query_id_generator(void) /* - * Basic plan tree walker + * Basic plan tree walker. * - * 'visitor' is applied right before return + * 'visitor' is applied right before return. */ -void -plan_tree_walker(Plan *plan, - void (*visitor) (Plan *plan, void *context), - void *context) +Plan * +plan_tree_visitor(Plan *plan, + Plan *(*visitor) (Plan *plan, void *context), + void *context) { ListCell *l; if (plan == NULL) - return; + return NULL; check_stack_depth(); @@ -88,45 +179,154 @@ plan_tree_walker(Plan *plan, switch (nodeTag(plan)) { case T_SubqueryScan: - plan_tree_walker(((SubqueryScan *) plan)->subplan, visitor, context); + plan_tree_visitor(((SubqueryScan *) plan)->subplan, visitor, context); break; case T_CustomScan: - foreach(l, ((CustomScan *) plan)->custom_plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + foreach (l, ((CustomScan *) plan)->custom_plans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ case T_ModifyTable: foreach (l, ((ModifyTable *) plan)->plans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; +#endif + + case T_Append: + foreach (l, ((Append *) plan)->appendplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); break; - /* Since they look alike */ case T_MergeAppend: + foreach (l, ((MergeAppend *) plan)->mergeplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapAnd: + foreach (l, ((BitmapAnd *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + case T_BitmapOr: + foreach (l, ((BitmapOr *) plan)->bitmapplans) + plan_tree_visitor((Plan *) lfirst(l), visitor, context); + break; + + default: + break; + } + + plan_tree_visitor(plan->lefttree, visitor, context); + plan_tree_visitor(plan->righttree, visitor, context); + + /* Apply visitor to the current node */ + return visitor(plan, context); +} + +void +state_tree_visitor(PlanState *state, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + Plan *plan; + ListCell *lc; + + if (state == NULL) + return; + + plan = state->plan; + + check_stack_depth(); + + /* Plan-type-specific fixes */ + switch (nodeTag(plan)) + { + case T_SubqueryScan: + state_tree_visitor(((SubqueryScanState *) state)->subplan, visitor, context); + break; + + case T_CustomScan: + foreach (lc, ((CustomScanState *) state)->custom_ps) + state_tree_visitor((PlanState *) lfirst(lc), visitor, context); + break; + +#if PG_VERSION_NUM < 140000 /* reworked in commit 86dc90056dfd */ + case T_ModifyTable: + state_visit_members(((ModifyTableState *) state)->mt_plans, + ((ModifyTableState *) state)->mt_nplans, + visitor, context); + break; +#endif + case T_Append: - foreach(l, ((Append *) plan)->appendplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + state_visit_members(((AppendState *) state)->appendplans, + ((AppendState *) state)->as_nplans, + visitor, context); + break; + + case T_MergeAppend: + state_visit_members(((MergeAppendState *) state)->mergeplans, + ((MergeAppendState *) state)->ms_nplans, + visitor, context); break; case T_BitmapAnd: - foreach(l, ((BitmapAnd *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + state_visit_members(((BitmapAndState *) state)->bitmapplans, + ((BitmapAndState *) state)->nplans, + visitor, context); break; case T_BitmapOr: - foreach(l, ((BitmapOr *) plan)->bitmapplans) - plan_tree_walker((Plan *) lfirst(l), visitor, context); + state_visit_members(((BitmapOrState *) state)->bitmapplans, + ((BitmapOrState *) state)->nplans, + visitor, context); break; default: break; } - plan_tree_walker(plan->lefttree, visitor, context); - plan_tree_walker(plan->righttree, visitor, context); + state_visit_subplans(state->initPlan, visitor, context); + state_visit_subplans(state->subPlan, visitor, context); + + state_tree_visitor(state->lefttree, visitor, context); + state_tree_visitor(state->righttree, visitor, context); /* Apply visitor to the current node */ - visitor(plan, context); + visitor(state, context); +} + +/* + * Walk a list of SubPlans (or initPlans, which also use SubPlan nodes). + */ +static void +state_visit_subplans(List *plans, + void (*visitor) (PlanState *plan, void *context), + void *context) +{ + ListCell *lc; + + foreach (lc, plans) + { + SubPlanState *sps = lfirst_node(SubPlanState, lc); + state_tree_visitor(sps->planstate, visitor, context); + } +} + +/* + * Walk the constituent plans of a ModifyTable, Append, MergeAppend, + * BitmapAnd, or BitmapOr node. + */ +static void +state_visit_members(PlanState **planstates, int nplans, + void (*visitor) (PlanState *plan, void *context), void *context) +{ + int i; + + for (i = 0; i < nplans; i++) + state_tree_visitor(planstates[i], visitor, context); } @@ -140,7 +340,19 @@ plan_tree_walker(Plan *plan, void pathman_transform_query(Query *parse, ParamListInfo params) { - pathman_transform_query_walker((Node *) parse, (void *) params); + transform_query_cxt context; + + /* Initialize context */ + memset((void *) &context, 0, sizeof(context)); + context.query_params = params; + + pathman_transform_query_walker((Node *) parse, (void *) &context); +} + +void +pathman_post_analyze_query(Query *parse) +{ + pathman_post_analyze_query_walker((Node *) parse, NULL); } /* Walker for pathman_transform_query() */ @@ -150,22 +362,63 @@ pathman_transform_query_walker(Node *node, void *context) if (node == NULL) return false; + else if (IsA(node, SubLink) || IsA(node, CommonTableExpr)) + { + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + + if (IsA(node, SubLink)) + { + next_context.parent_sublink = (SubLink *) node; + next_context.parent_cte = NULL; + } + else + { + next_context.parent_sublink = NULL; + next_context.parent_cte = (CommonTableExpr *) node; + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_transform_query_walker, + (void *) &next_context); + } + else if (IsA(node, Query)) { - Query *query = (Query *) node; + Query *query = (Query *) node; + transform_query_cxt *current_context = context, + next_context; + + /* Initialize next context for bottom subqueries */ + next_context = *current_context; + switch (query->commandType) + { + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, SELECT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, INSERT); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, UPDATE); + TRANSFORM_CONTEXT_SWITCH_SET(&next_context, DELETE); + + default: + break; + } + next_context.parent_sublink = NULL; + next_context.parent_cte = NULL; /* Assign Query a 'queryId' */ assign_query_id(query); /* Apply Query tree modifiers */ - rowmark_add_tableoids(query); - disable_standard_inheritance(query); - handle_modification_query(query, (ParamListInfo) context); + disable_standard_inheritance(query, current_context); + handle_modification_query(query, current_context); /* Handle Query node */ return query_tree_walker(query, pathman_transform_query_walker, - context, + (void *) &next_context, 0); } @@ -175,6 +428,33 @@ pathman_transform_query_walker(Node *node, void *context) context); } +static bool +pathman_post_analyze_query_walker(Node *node, void *context) +{ + if (node == NULL) + return false; + + else if (IsA(node, Query)) + { + Query *query = (Query *) node; + + /* Make changes for declarative syntax */ +#ifdef ENABLE_DECLARATIVE + modify_declarative_partitioning_query(query); +#endif + + /* Handle Query node */ + return query_tree_walker(query, + pathman_post_analyze_query_walker, + context, + 0); + } + + /* Handle expression subtree */ + return expression_tree_walker(node, + pathman_post_analyze_query_walker, + context); +} /* * ---------------------- @@ -184,18 +464,21 @@ pathman_transform_query_walker(Node *node, void *context) /* Disable standard inheritance if table is partitioned by pg_pathman */ static void -disable_standard_inheritance(Query *parse) +disable_standard_inheritance(Query *parse, transform_query_cxt *context) { ListCell *lc; Index current_rti; /* current range table entry index */ -/* - * We can't handle non-SELECT queries unless - * there's a pathman_expand_inherited_rtentry_hook() - */ -#ifndef NATIVE_EXPAND_RTE_HOOK +#ifdef LEGACY_ROWMARKS_95 + /* Don't process non-SELECT queries */ if (parse->commandType != CMD_SELECT) return; + + /* Don't process queries under UPDATE or DELETE (except for CTEs) */ + if ((TRANSFORM_CONTEXT_HAS_PARENT(context, UPDATE) || + TRANSFORM_CONTEXT_HAS_PARENT(context, DELETE)) && + !TRANSFORM_CONTEXT_QUERY_IS_CTE(context, parse)) + return; #endif /* Walk through RangeTblEntries list */ @@ -211,203 +494,343 @@ disable_standard_inheritance(Query *parse) if (rte->rtekind != RTE_RELATION || rte->relkind != RELKIND_RELATION || parse->resultRelation == current_rti) /* is it a result relation? */ + { +#if PG_VERSION_NUM >= 150000 /* for commit 7103ebb7aae8 */ + if (parse->commandType == CMD_MERGE && + (rte->rtekind == RTE_RELATION || + rte->relkind == RELKIND_RELATION) && + rte->inh && has_pathman_relation_info(rte->relid)) + elog(ERROR, "pg_pathman doesn't support MERGE command yet"); +#endif + continue; + } /* Table may be partitioned */ if (rte->inh) { - const PartRelationInfo *prel; +#ifdef LEGACY_ROWMARKS_95 + /* Don't process queries with RowMarks on 9.5 */ + if (get_parse_rowmark(parse, current_rti)) + continue; +#endif /* Proceed if table is partitioned by pg_pathman */ - if ((prel = get_pathman_relation_info(rte->relid)) != NULL) + if (has_pathman_relation_info(rte->relid)) { - /* - * HACK: unset the 'inh' flag to disable standard - * planning. We'll set it again later. - */ + /* HACK: unset the 'inh' flag to disable standard planning */ rte->inh = false; /* Try marking it using PARENTHOOD_ALLOWED */ - assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_ALLOWED); + assign_rel_parenthood_status(rte, PARENTHOOD_ALLOWED); } } /* Else try marking it using PARENTHOOD_DISALLOWED */ - else assign_rel_parenthood_status(parse->queryId, rte, - PARENTHOOD_DISALLOWED); + else assign_rel_parenthood_status(rte, PARENTHOOD_DISALLOWED); } } /* Checks if query affects only one partition */ static void -handle_modification_query(Query *parse, ParamListInfo params) +handle_modification_query(Query *parse, transform_query_cxt *context) { - const PartRelationInfo *prel; - Node *prel_expr; - List *ranges; - RangeTblEntry *rte; - WrapperNode *wrap; - Expr *expr; - WalkerContext context; - Index result_rel; - - /* Fetch index of result relation */ - result_rel = parse->resultRelation; + RangeTblEntry *rte; + Oid child; + Node *quals; + Index result_rti = parse->resultRelation; + ParamListInfo params = context->query_params; /* Exit if it's not a DELETE or UPDATE query */ - if (result_rel == 0 || - (parse->commandType != CMD_UPDATE && - parse->commandType != CMD_DELETE)) + if (result_rti == 0 || (parse->commandType != CMD_UPDATE && + parse->commandType != CMD_DELETE)) return; - rte = rt_fetch(result_rel, parse->rtable); + /* can't set earlier because CMD_UTILITY doesn't have jointree */ + quals = parse->jointree->quals; + rte = rt_fetch(result_rti, parse->rtable); - /* Exit if it's DELETE FROM ONLY table */ - if (!rte->inh) return; + /* Exit if it's ONLY table */ + if (!rte->inh) + return; - prel = get_pathman_relation_info(rte->relid); + /* Check if we can replace PARAMs with CONSTs */ + if (params && clause_contains_params(quals)) + quals = eval_extern_params_mutator(quals, params); - /* Exit if it's not partitioned */ - if (!prel) return; + /* Evaluate constaint expressions */ + quals = eval_const_expressions(NULL, quals); - /* Exit if we must include parent */ - if (prel->enable_parent) return; + /* Parse syntax tree and extract deepest partition if possible */ + child = find_deepest_partition(rte->relid, result_rti, (Expr *) quals); - /* Parse syntax tree and extract partition ranges */ - ranges = list_make1_irange_full(prel, IR_COMPLETE); - expr = (Expr *) eval_const_expressions(NULL, parse->jointree->quals); + /* Substitute parent table with partition */ + if (OidIsValid(child)) + { + Relation child_rel, + parent_rel; - /* Exit if there's no expr (no use) */ - if (!expr) return; + LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ - /* Check if we can replace PARAMs with CONSTs */ - if (params && clause_contains_params((Node *) expr)) - expr = (Expr *) eval_extern_params_mutator((Node *) expr, params); + HeapTuple syscache_htup; + char child_relkind; + Oid parent = rte->relid; - /* Prepare partitioning expression */ - prel_expr = PrelExpressionForRelid(prel, result_rel); + List *translated_vars; + adjust_appendrel_varnos_cxt aav_cxt; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *parent_perminfo, + *child_perminfo; +#endif - /* Parse syntax tree and extract partition ranges */ - InitWalkerContext(&context, prel_expr, prel, NULL); - wrap = walk_expr_tree(expr, &context); + /* Lock 'child' table */ + LockRelationOid(child, lockmode); - ranges = irange_list_intersection(ranges, wrap->rangeset); + /* Make sure that 'child' exists */ + syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); + if (HeapTupleIsValid(syscache_htup)) + { + Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); - /* - * If only one partition is affected, - * substitute parent table with the partition. - */ - if (irange_list_length(ranges) == 1) + /* Fetch child's relkind and free cache entry */ + child_relkind = reltup->relkind; + ReleaseSysCache(syscache_htup); + } + else + { + UnlockRelationOid(child, lockmode); + return; /* nothing to do here */ + } + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + parent_perminfo = getRTEPermissionInfo(parse->rteperminfos, rte); +#endif + /* Update RTE's relid and relkind (for FDW) */ + rte->relid = child; + rte->relkind = child_relkind; + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* Copy parent RTEPermissionInfo. */ + rte->perminfoindex = 0; /* expected by addRTEPermissionInfo() */ + child_perminfo = addRTEPermissionInfo(&parse->rteperminfos, rte); + memcpy(child_perminfo, parent_perminfo, sizeof(RTEPermissionInfo)); + + /* Correct RTEPermissionInfo for child. */ + child_perminfo->relid = child; + child_perminfo->inh = false; +#endif + + /* HACK: unset the 'inh' flag (no children) */ + rte->inh = false; + + /* Both tables are already locked */ + child_rel = heap_open_compat(child, NoLock); + parent_rel = heap_open_compat(parent, NoLock); + + make_inh_translation_list(parent_rel, child_rel, 0, &translated_vars, NULL); + + /* Perform some additional adjustments */ + if (!inh_translation_list_is_trivial(translated_vars)) + { + /* Translate varnos for this child */ + aav_cxt.child_varno = result_rti; + aav_cxt.parent_relid = parent; + aav_cxt.parent_reltype = RelationGetDescr(parent_rel)->tdtypeid; + aav_cxt.child_reltype = RelationGetDescr(child_rel)->tdtypeid; + aav_cxt.translated_vars = translated_vars; + adjust_appendrel_varnos((Node *) parse, &aav_cxt); + +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + child_perminfo->selectedCols = translate_col_privs(parent_perminfo->selectedCols, translated_vars); + child_perminfo->insertedCols = translate_col_privs(parent_perminfo->insertedCols, translated_vars); + child_perminfo->updatedCols = translate_col_privs(parent_perminfo->updatedCols, translated_vars); +#else + /* Translate column privileges for this child */ + rte->selectedCols = translate_col_privs(rte->selectedCols, translated_vars); + rte->insertedCols = translate_col_privs(rte->insertedCols, translated_vars); + rte->updatedCols = translate_col_privs(rte->updatedCols, translated_vars); +#endif + } + + /* Close relations (should remain locked, though) */ + heap_close_compat(child_rel, NoLock); + heap_close_compat(parent_rel, NoLock); + } +} + +/* Remap parent's attributes to child ones */ +static Node * +adjust_appendrel_varnos(Node *node, adjust_appendrel_varnos_cxt *context) +{ + if (node == NULL) + return NULL; + + if (IsA(node, Query)) { - IndexRange irange = linitial_irange(ranges); + Query *query = (Query *) node; + ListCell *lc; - /* Exactly one partition (bounds are equal) */ - if (irange_lower(irange) == irange_upper(irange)) + /* FIXME: we might need to reorder TargetEntries */ + foreach (lc, query->targetList) { - Oid *children = PrelGetChildrenArray(prel), - child = children[irange_lower(irange)], - parent = rte->relid; + TargetEntry *te = (TargetEntry *) lfirst(lc); + Var *child_var; - Relation child_rel, - parent_rel; + if (te->resjunk) + continue; - void *tuple_map; /* we don't need the map itself */ + if (te->resno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); - LOCKMODE lockmode = RowExclusiveLock; /* UPDATE | DELETE */ + child_var = list_nth(context->translated_vars, te->resno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + te->resno, get_rel_name(context->parent_relid)); - HeapTuple syscache_htup; - char child_relkind; + /* Transform attribute number */ + te->resno = child_var->varattno; + } + + /* NOTE: we shouldn't copy top-level Query */ + return (Node *) query_tree_mutator((Query *) node, + adjust_appendrel_varnos, + context, + (QTW_IGNORE_RC_SUBQUERIES | + QTW_DONT_COPY_QUERY)); + } - /* Lock 'child' table */ - LockRelationOid(child, lockmode); + if (IsA(node, Var)) + { + Var *var = (Var *) node; - /* Make sure that 'child' exists */ - syscache_htup = SearchSysCache1(RELOID, ObjectIdGetDatum(child)); - if (HeapTupleIsValid(syscache_htup)) + /* See adjust_appendrel_attrs_mutator() */ + if (var->varno == context->child_varno) + { + if (var->varattno > 0) { - Form_pg_class reltup = (Form_pg_class) GETSTRUCT(syscache_htup); + Var *child_var; + + var = copyObject(var); + + if (var->varattno > list_length(context->translated_vars)) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); + + child_var = list_nth(context->translated_vars, var->varattno - 1); + if (!child_var) + elog(ERROR, "attribute %d of relation \"%s\" does not exist", + var->varattno, get_rel_name(context->parent_relid)); - /* Fetch child's relkind and free cache entry */ - child_relkind = reltup->relkind; - ReleaseSysCache(syscache_htup); + /* Transform attribute number */ + var->varattno = child_var->varattno; } - else + else if (var->varattno == 0) { - UnlockRelationOid(child, lockmode); - return; /* nothing to do here */ - } + ConvertRowtypeExpr *r = makeNode(ConvertRowtypeExpr); - /* Both tables are already locked */ - child_rel = heap_open(child, NoLock); - parent_rel = heap_open(parent, NoLock); + Assert(var->vartype = context->parent_reltype); - /* Build a conversion map (may be trivial, i.e. NULL) */ - tuple_map = build_part_tuple_map(parent_rel, child_rel); - if (tuple_map) - free_conversion_map((TupleConversionMap *) tuple_map); + r->arg = (Expr *) var; + r->resulttype = context->parent_reltype; + r->convertformat = COERCE_IMPLICIT_CAST; + r->location = -1; - /* Close relations (should remain locked, though) */ - heap_close(child_rel, NoLock); - heap_close(parent_rel, NoLock); + /* Make sure the Var node has the right type ID, too */ + var->vartype = context->child_reltype; - /* Exit if tuple map was NOT trivial */ - if (tuple_map) /* just checking the pointer! */ - return; + return (Node *) r; + } + } - /* Update RTE's relid and relkind (for FDW) */ - rte->relid = child; - rte->relkind = child_relkind; + return (Node *) var; + } - /* HACK: unset the 'inh' flag (no children) */ - rte->inh = false; - } + if (IsA(node, SubLink)) + { + SubLink *sl = (SubLink *) node; + + /* Examine its expression */ + sl->testexpr = expression_tree_mutator_compat(sl->testexpr, + adjust_appendrel_varnos, + context); + return (Node *) sl; } + + return expression_tree_mutator_compat(node, + adjust_appendrel_varnos, + context); } /* - * ------------------------------- - * PartitionFilter-related stuff - * ------------------------------- + * ---------------------------------------------------- + * PartitionFilter and PartitionRouter -related stuff + * ---------------------------------------------------- */ /* Add PartitionFilter nodes to the plan tree */ -void +Plan * add_partition_filters(List *rtable, Plan *plan) { if (pg_pathman_enable_partition_filter) - plan_tree_walker(plan, partition_filter_visitor, rtable); + return plan_tree_visitor(plan, partition_filter_visitor, rtable); + + return NULL; +} + +/* Add PartitionRouter nodes to the plan tree */ +Plan * +add_partition_routers(List *rtable, Plan *plan) +{ + if (pg_pathman_enable_partition_router) + return plan_tree_visitor(plan, partition_router_visitor, rtable); + + return NULL; } /* - * Add partition filters to ModifyTable node's children. + * Add PartitionFilters to ModifyTable node's children. * * 'context' should point to the PlannedStmt->rtable. */ -static void +static Plan * partition_filter_visitor(Plan *plan, void *context) { List *rtable = (List *) context; ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else ListCell *lc1, *lc2, *lc3; +#endif /* Skip if not ModifyTable with 'INSERT' command */ if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_INSERT) - return; + return NULL; Assert(rtable && IsA(rtable, List)); lc3 = list_head(modify_table->returningLists); - forboth (lc1, modify_table->plans, lc2, modify_table->resultRelations) +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) +#endif { - Index rindex = lfirst_int(lc2); - Oid relid = getrelid(rindex, rtable); - const PartRelationInfo *prel = get_pathman_relation_info(relid); + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable); /* Check that table is partitioned */ - if (prel) + if (has_pathman_relation_info(relid)) { List *returning_list = NIL; @@ -415,15 +838,130 @@ partition_filter_visitor(Plan *plan, void *context) if (lc3) { returning_list = lfirst(lc3); - lc3 = lnext(lc3); +#if PG_VERSION_NUM < 140000 + lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif } - lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), - relid, +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + outerPlan(modify_table) = make_partition_filter(subplan, relid, + modify_table->nominalRelation, + modify_table->onConflictAction, + modify_table->operation, + returning_list); +#else + lfirst(lc1) = make_partition_filter((Plan *) lfirst(lc1), relid, + modify_table->nominalRelation, modify_table->onConflictAction, + modify_table->operation, returning_list); +#endif } } + + return NULL; +} + +/* + * Add PartitionRouter to ModifyTable node's children. + * + * 'context' should point to the PlannedStmt->rtable. + */ +static Plan * +partition_router_visitor(Plan *plan, void *context) +{ + List *rtable = (List *) context; + ModifyTable *modify_table = (ModifyTable *) plan; +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + /* + * We have only one subplan for 14: need to modify it without + * using any cycle + */ + Plan *subplan = outerPlan(modify_table); + ListCell *lc2, + *lc3; +#else + ListCell *lc1, + *lc2, + *lc3; +#endif + bool changed = false; + + /* Skip if not ModifyTable with 'UPDATE' command */ + if (!IsA(modify_table, ModifyTable) || modify_table->operation != CMD_UPDATE) + return NULL; + + Assert(rtable && IsA(rtable, List)); + + if (modifytable_contains_fdw(rtable, modify_table)) + { + ereport(WARNING, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg(UPDATE_NODE_NAME " does not support foreign data wrappers"))); + return NULL; + } + + lc3 = list_head(modify_table->returningLists); +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + lc2 = list_head(modify_table->resultRelations); +#else + forboth (lc1, modify_table->plans, + lc2, modify_table->resultRelations) +#endif + { + Index rindex = lfirst_int(lc2); + Oid relid = getrelid(rindex, rtable), + tmp_relid; + + /* Find topmost parent */ + while (OidIsValid(tmp_relid = get_parent_of_partition(relid))) + relid = tmp_relid; + + /* Check that table is partitioned */ + if (has_pathman_relation_info(relid)) + { + List *returning_list = NIL; + Plan *prouter, + *pfilter; + + /* Extract returning list if possible */ + if (lc3) + { + returning_list = lfirst(lc3); +#if PG_VERSION_NUM < 140000 + lc3 = lnext_compat(modify_table->returningLists, lc3); +#endif + } + +#if PG_VERSION_NUM >= 140000 /* for changes 86dc90056dfd */ + prouter = make_partition_router(subplan, + modify_table->epqParam, + modify_table->nominalRelation); +#else + prouter = make_partition_router((Plan *) lfirst(lc1), + modify_table->epqParam, + modify_table->nominalRelation); +#endif + + pfilter = make_partition_filter((Plan *) prouter, relid, + modify_table->nominalRelation, + ONCONFLICT_NONE, + CMD_UPDATE, + returning_list); + +#if PG_VERSION_NUM >= 140000 /* for changes in 86dc90056dfd */ + outerPlan(modify_table) = pfilter; +#else + lfirst(lc1) = pfilter; +#endif + changed = true; + } + } + + if (changed) + return make_partition_overseer(plan); + + return NULL; } @@ -433,59 +971,132 @@ partition_filter_visitor(Plan *plan, void *context) * ----------------------------------------------- */ +#define RPS_STATUS_ASSIGNED ( (Index) 0x2 ) +#define RPS_ENABLE_PARENT ( (Index) 0x1 ) + /* Set parenthood status (per query level) */ void -assign_rel_parenthood_status(uint32 query_id, - RangeTblEntry *rte, +assign_rel_parenthood_status(RangeTblEntry *rte, rel_parenthood_status new_status) { + Assert(rte->rtekind != RTE_CTE); - List *old_relation_tag; - - old_relation_tag = rte_attach_tag(query_id, rte, - make_rte_tag_int(PARENTHOOD_TAG, - new_status)); - - /* We already have a PARENTHOOD_TAG, examine it's value */ - if (old_relation_tag && - tag_extract_parenthood_status(old_relation_tag) != new_status) - { - elog(ERROR, - "it is prohibited to apply ONLY modifier to partitioned " - "tables which have already been mentioned without ONLY"); - } + /* HACK: set relevant bits in RTE */ + rte->ctelevelsup |= RPS_STATUS_ASSIGNED; + if (new_status == PARENTHOOD_ALLOWED) + rte->ctelevelsup |= RPS_ENABLE_PARENT; } /* Get parenthood status (per query level) */ rel_parenthood_status -get_rel_parenthood_status(uint32 query_id, RangeTblEntry *rte) +get_rel_parenthood_status(RangeTblEntry *rte) { - List *relation_tag; + Assert(rte->rtekind != RTE_CTE); - relation_tag = rte_fetch_tag(query_id, rte, PARENTHOOD_TAG); - if (relation_tag) - return tag_extract_parenthood_status(relation_tag); + /* HACK: check relevant bits in RTE */ + if (rte->ctelevelsup & RPS_STATUS_ASSIGNED) + return (rte->ctelevelsup & RPS_ENABLE_PARENT) ? + PARENTHOOD_ALLOWED : + PARENTHOOD_DISALLOWED; /* Not found, return stub value */ return PARENTHOOD_NOT_SET; } -static rel_parenthood_status -tag_extract_parenthood_status(List *relation_tag) + +/* + * -------------------------- + * Various helper functions + * -------------------------- + */ + +/* Does ModifyTable node contain any FDWs? */ +static bool +modifytable_contains_fdw(List *rtable, ModifyTable *node) { - const Value *value; - rel_parenthood_status status; + ListCell *lc; - rte_deconstruct_tag(relation_tag, NULL, &value); - Assert(value && IsA(value, Integer)); + foreach(lc, node->resultRelations) + { + Index rti = lfirst_int(lc); + RangeTblEntry *rte = rt_fetch(rti, rtable); - status = (rel_parenthood_status) intVal(value); - Assert(status >= PARENTHOOD_NOT_SET && - status <= PARENTHOOD_ALLOWED); + if (rte->relkind == RELKIND_FOREIGN_TABLE) + return true; + } - return status; + return false; } +/* + * Find a single deepest subpartition using quals. + * It's always better to narrow down the set of tables to be scanned. + * Return InvalidOid if it's not possible (e.g. table is not partitioned). + */ +static Oid +find_deepest_partition(Oid relid, Index rti, Expr *quals) +{ + PartRelationInfo *prel; + Oid result = InvalidOid; + + /* Exit if there's no quals (no use) */ + if (!quals) + return result; + + /* Try pruning if table is partitioned */ + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + Node *prel_expr; + WalkerContext context; + List *ranges; + WrapperNode *wrap; + + /* Prepare partitioning expression */ + prel_expr = PrelExpressionForRelid(prel, rti); + + /* First we select all available partitions... */ + ranges = list_make1_irange_full(prel, IR_COMPLETE); + + /* Parse syntax tree and extract partition ranges */ + InitWalkerContext(&context, prel_expr, prel, NULL); + wrap = walk_expr_tree(quals, &context); + ranges = irange_list_intersection(ranges, wrap->rangeset); + + switch (irange_list_length(ranges)) + { + /* Scan only parent (don't do constraint elimination) */ + case 0: + result = relid; + break; + + /* Handle the remaining partition */ + case 1: + if (!prel->enable_parent) + { + IndexRange irange = linitial_irange(ranges); + Oid *children = PrelGetChildrenArray(prel), + child = children[irange_lower(irange)]; + + /* Scan this partition */ + result = child; + + /* Try to go deeper and see if there are subpartitions */ + child = find_deepest_partition(child, rti, quals); + if (OidIsValid(child)) + result = child; + } + break; + + default: + break; + } + + /* Don't forget to close 'prel'! */ + close_pathman_relation_info(prel); + } + + return result; +} /* Replace extern param nodes with consts */ static Node * @@ -505,7 +1116,10 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) param->paramid > 0 && param->paramid <= params->numParams) { - ParamExternData *prm = ¶ms->params[param->paramid - 1]; + ParamExternData prmdata; /* storage for 'prm' (PG 11) */ + ParamExternData *prm = CustomEvalParamExternCompat(param, + params, + &prmdata); if (OidIsValid(prm->ptype)) { @@ -541,6 +1155,57 @@ eval_extern_params_mutator(Node *node, ParamListInfo params) } } - return expression_tree_mutator(node, eval_extern_params_mutator, - (void *) params); + return expression_tree_mutator_compat(node, eval_extern_params_mutator, + (void *) params); +} + +/* Check whether Var translation list is trivial (no shuffle) */ +static bool +inh_translation_list_is_trivial(List *translated_vars) +{ + ListCell *lc; + AttrNumber i = 1; + + foreach (lc, translated_vars) + { + Var *var = (Var *) lfirst(lc); + + if (var && var->varattno != i) + return false; + + i++; + } + + return true; +} + + +/* + * ----------------------------------------------- + * Count number of times we've visited planner() + * ----------------------------------------------- + */ + +static int32 planner_calls = 0; + +void +incr_planner_calls_count(void) +{ + Assert(planner_calls < PG_INT32_MAX); + + planner_calls++; +} + +void +decr_planner_calls_count(void) +{ + Assert(planner_calls > 0); + + planner_calls--; +} + +int32 +get_planner_calls_count(void) +{ + return planner_calls; } diff --git a/src/rangeset.c b/src/rangeset.c index 15bb5849..9f7b2aa1 100644 --- a/src/rangeset.c +++ b/src/rangeset.c @@ -3,11 +3,12 @@ * rangeset.c * IndexRange functions * - * Copyright (c) 2015-2016, Postgres Professional + * Copyright (c) 2015-2020, Postgres Professional * * ------------------------------------------------------------------------ */ +#include "compat/pg_compat.h" #include "rangeset.h" @@ -238,25 +239,25 @@ irange_list_union(List *a, List *b) if (irange_lower(lfirst_irange(ca)) <= irange_lower(lfirst_irange(cb))) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } else { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } } /* Fetch next irange from A */ else if (ca) { next = lfirst_irange(ca); - ca = lnext(ca); /* move to next cell */ + ca = lnext_compat(a, ca); /* move to next cell */ } /* Fetch next irange from B */ else if (cb) { next = lfirst_irange(cb); - cb = lnext(cb); /* move to next cell */ + cb = lnext_compat(b, cb); /* move to next cell */ } /* Put this irange to 'cur' if don't have it yet */ @@ -339,9 +340,9 @@ irange_list_intersection(List *a, List *b) * irange is greater (or equal) to upper bound of current. */ if (irange_upper(ra) <= irange_upper(rb)) - ca = lnext(ca); + ca = lnext_compat(a, ca); if (irange_upper(ra) >= irange_upper(rb)) - cb = lnext(cb); + cb = lnext_compat(b, cb); } return result; } diff --git a/src/relation_info.c b/src/relation_info.c index 12965f16..2794a183 100644 --- a/src/relation_info.c +++ b/src/relation_info.c @@ -3,7 +3,7 @@ * relation_info.c * Data structures describing partitioned relations * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * * ------------------------------------------------------------------------ */ @@ -16,6 +16,10 @@ #include "xact_handling.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/genam.h" +#include "access/table.h" +#endif #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/indexing.h" @@ -24,8 +28,12 @@ #include "catalog/pg_type.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" #include "optimizer/var.h" +#endif #include "parser/analyze.h" #include "parser/parser.h" #include "storage/lmgr.h" @@ -33,7 +41,9 @@ #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" +#include "utils/inval.h" #include "utils/memutils.h" +#include "utils/resowner.h" #include "utils/ruleutils.h" #include "utils/syscache.h" #include "utils/lsyscache.h" @@ -42,8 +52,7 @@ #if PG_VERSION_NUM < 90600 #include "optimizer/planmain.h" #endif - -#if PG_VERSION_NUM >= 90600 +#if PG_VERSION_NUM < 110000 && PG_VERSION_NUM >= 90600 #include "catalog/pg_constraint_fn.h" #endif @@ -53,6 +62,54 @@ #define COOK_PART_EXPR_ERROR "failed to analyze partitioning expression \"%s\"" +#ifdef USE_RELINFO_LEAK_TRACKER +#undef get_pathman_relation_info +#undef close_pathman_relation_info + +const char *prel_resowner_function = NULL; +int prel_resowner_line = 0; + +#define LeakTrackerAdd(prel) \ + do { \ + MemoryContext leak_tracker_add_old_mcxt = MemoryContextSwitchTo((prel)->mcxt); \ + (prel)->owners = \ + list_append_unique( \ + (prel)->owners, \ + list_make2(makeString((char *) prel_resowner_function), \ + makeInteger(prel_resowner_line))); \ + MemoryContextSwitchTo(leak_tracker_add_old_mcxt); \ + \ + (prel)->access_total++; \ + } while (0) + +#define LeakTrackerPrint(prel) \ + do { \ + ListCell *leak_tracker_print_lc; \ + foreach (leak_tracker_print_lc, (prel)->owners) \ + { \ + char *fun = strVal(linitial(lfirst(leak_tracker_print_lc))); \ + int line = intVal(lsecond(lfirst(leak_tracker_print_lc))); \ + elog(WARNING, "PartRelationInfo referenced in %s:%d", fun, line); \ + } \ + } while (0) + +#define LeakTrackerFree(prel) \ + do { \ + ListCell *leak_tracker_free_lc; \ + foreach (leak_tracker_free_lc, (prel)->owners) \ + { \ + list_free_deep(lfirst(leak_tracker_free_lc)); \ + } \ + list_free((prel)->owners); \ + (prel)->owners = NIL; \ + } while (0) +#else +#define LeakTrackerAdd(prel) +#define LeakTrackerPrint(prel) +#define LeakTrackerFree(prel) +#endif + + /* Comparison function info */ typedef struct cmp_func_info { @@ -60,6 +117,13 @@ typedef struct cmp_func_info Oid collid; } cmp_func_info; +typedef struct prel_resowner_info +{ + ResourceOwner owner; + List *prels; +} prel_resowner_info; + + /* * For pg_pathman.enable_bounds_cache GUC. */ @@ -69,37 +133,31 @@ bool pg_pathman_enable_bounds_cache = true; /* * We delay all invalidation jobs received in relcache hook. */ -static List *delayed_invalidation_parent_rels = NIL; -static List *delayed_invalidation_vague_rels = NIL; static bool delayed_shutdown = false; /* pathman was dropped */ +/* + * PartRelationInfo is controlled by ResourceOwner; + * resowner -> List of controlled PartRelationInfos by this ResourceOwner + */ +HTAB *prel_resowner = NULL; -/* Add unique Oid to list, allocate in TopPathmanContext */ -#define list_add_unique(list, oid) \ - do { \ - MemoryContext old_mcxt = MemoryContextSwitchTo(TopPathmanContext); \ - list = list_append_unique_oid(list, ObjectIdGetDatum(oid)); \ - MemoryContextSwitchTo(old_mcxt); \ - } while (0) - -#define free_invalidation_list(list) \ - do { \ - list_free(list); \ - list = NIL; \ - } while (0) /* Handy wrappers for Oids */ #define bsearch_oid(key, array, array_size) \ bsearch((const void *) &(key), (array), (array_size), sizeof(Oid), oid_cmp) -static bool try_invalidate_parent(Oid relid, Oid *parents, int parents_count); -static Oid try_syscache_parent_search(Oid partition, PartParentSearch *status); -static Oid get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action); +static PartRelationInfo *build_pathman_relation_info(Oid relid, Datum *values); +static void free_pathman_relation_info(PartRelationInfo *prel); +static void invalidate_psin_entries_using_relid(Oid relid); +static void invalidate_psin_entry(PartStatusInfo *psin); -static Expr *get_partition_constraint_expr(Oid partition); +static PartRelationInfo *resowner_prel_add(PartRelationInfo *prel); +static PartRelationInfo *resowner_prel_del(PartRelationInfo *prel); +static void resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg); static void fill_prel_with_partitions(PartRelationInfo *prel, const Oid *partitions, @@ -111,6 +169,8 @@ static void fill_pbin_with_bounds(PartBoundInfo *pbin, static int cmp_range_entries(const void *p1, const void *p2, void *arg); +static void forget_bounds_of_partition(Oid partition); + static bool query_contains_subqueries(Node *node, void *context); @@ -129,316 +189,536 @@ init_relation_info_static_data(void) NULL); } + /* - * refresh\invalidate\get\remove PartRelationInfo functions. + * Status cache routines. */ -const PartRelationInfo * -refresh_pathman_relation_info(Oid relid, - Datum *values, - bool allow_incomplete) +/* Invalidate PartStatusInfo for 'relid' */ +void +forget_status_of_relation(Oid relid) { - const LOCKMODE lockmode = AccessShareLock; - const TypeCacheEntry *typcache; - Oid *prel_children; - uint32 prel_children_count = 0, - i; - PartRelationInfo *prel; - Datum param_values[Natts_pathman_config_params]; - bool param_isnull[Natts_pathman_config_params]; - char *expr; - MemoryContext old_mcxt; + PartStatusInfo *psin; + PartParentInfo *ppar; - AssertTemporaryContext(); - prel = invalidate_pathman_relation_info(relid, NULL); - Assert(prel); + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); + + /* + * Find parent of this relation. + * + * We don't want to use get_parent_of_partition() + * since it relies upon the syscache. + */ + ppar = pathman_cache_search_relid(parents_cache, + relid, HASH_FIND, + NULL); - /* Try locking parent, exit fast if 'allow_incomplete' */ - if (allow_incomplete) + /* Invalidate parent directly */ + if (ppar) { - if (!ConditionalLockRelationOid(relid, lockmode)) - return NULL; /* leave an invalid entry */ + /* Find status cache entry for parent */ + psin = pathman_cache_search_relid(status_cache, + ppar->parent_relid, HASH_FIND, + NULL); + if (psin) + invalidate_psin_entry(psin); } - else LockRelationOid(relid, lockmode); + /* Otherwise, look through all entries */ + else invalidate_psin_entries_using_relid(relid); +} - /* Check if parent exists */ - if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) +/* Invalidate all PartStatusInfo entries */ +void +invalidate_status_cache(void) +{ + invalidate_psin_entries_using_relid(InvalidOid); +} + +/* Invalidate PartStatusInfo entry referencing 'relid' */ +static void +invalidate_psin_entries_using_relid(Oid relid) +{ + HASH_SEQ_STATUS status; + PartStatusInfo *psin; + + hash_seq_init(&status, status_cache); + + while ((psin = (PartStatusInfo *) hash_seq_search(&status)) != NULL) { - /* Nope, it doesn't, remove this entry and exit */ - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ + if (!OidIsValid(relid) || + psin->relid == relid || + (psin->prel && PrelHasPartition(psin->prel, relid))) + { + /* Perform invalidation */ + invalidate_psin_entry(psin); + + /* Exit if exact match */ + if (OidIsValid(relid)) + { + hash_seq_term(&status); + break; + } + } + } +} + +/* Invalidate single PartStatusInfo entry */ +static void +invalidate_psin_entry(PartStatusInfo *psin) +{ +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, "invalidation message for relation %u [%u]", + psin->relid, MyProcPid); +#endif + + if (psin->prel) + { + if (PrelReferenceCount(psin->prel) > 0) + { + /* Mark entry as outdated and detach it */ + PrelIsFresh(psin->prel) = false; + } + else + { + free_pathman_relation_info(psin->prel); + } } - /* Make both arrays point to NULL */ - prel->children = NULL; - prel->ranges = NULL; + (void) pathman_cache_search_relid(status_cache, + psin->relid, + HASH_REMOVE, + NULL); +} + - /* Set partitioning type */ - prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); +/* + * Dispatch cache routines. + */ - /* Fetch cooked partitioning expression */ - expr = TextDatumGetCString(values[Anum_pathman_config_cooked_expr - 1]); +/* Close PartRelationInfo entry */ +void +close_pathman_relation_info(PartRelationInfo *prel) +{ + Assert(prel); - /* Create a new memory context to store expression tree etc */ - prel->mcxt = AllocSetContextCreate(PathmanRelationCacheContext, - CppAsString(refresh_pathman_relation_info), - ALLOCSET_SMALL_SIZES); + (void) resowner_prel_del(prel); +} - /* Switch to persistent memory context */ - old_mcxt = MemoryContextSwitchTo(prel->mcxt); +/* Check if relation is partitioned by pg_pathman */ +bool +has_pathman_relation_info(Oid relid) +{ + PartRelationInfo *prel; - /* Build partitioning expression tree */ - prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); - prel->expr = (Node *) stringToNode(expr); - fix_opfuncids(prel->expr); + if ((prel = get_pathman_relation_info(relid)) != NULL) + { + close_pathman_relation_info(prel); - /* Extract Vars and varattnos of partitioning expression */ - prel->expr_vars = NIL; - prel->expr_atts = NULL; - prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); - pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + return true; + } - MemoryContextSwitchTo(old_mcxt); + return false; +} - /* First, fetch type of partitioning expression */ - prel->ev_type = exprType(prel->expr); - prel->ev_typmod = exprTypmod(prel->expr); - prel->ev_collid = exprCollation(prel->expr); +/* Get PartRelationInfo from local cache */ +PartRelationInfo * +get_pathman_relation_info(Oid relid) +{ + PartStatusInfo *psin; + + if (!IsPathmanReady()) + elog(ERROR, "pg_pathman is disabled"); - /* Fetch HASH & CMP fuctions and other stuff from type cache */ - typcache = lookup_type_cache(prel->ev_type, - TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - prel->ev_byval = typcache->typbyval; - prel->ev_len = typcache->typlen; - prel->ev_align = typcache->typalign; + /* We don't create entries for catalog */ + if (relid < FirstNormalObjectId) + return NULL; - prel->cmp_proc = typcache->cmp_proc; - prel->hash_proc = typcache->hash_proc; + /* Do we know anything about this relation? */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); - /* Try searching for children (don't wait if we can't lock) */ - switch (find_inheritance_children_array(relid, lockmode, - allow_incomplete, - &prel_children_count, - &prel_children)) + if (!psin) { - /* If there's no children at all, remove this entry */ - case FCS_NO_CHILDREN: - elog(DEBUG2, "refresh: relation %u has no children [%u]", - relid, MyProcPid); + PartRelationInfo *prel = NULL; + ItemPointerData iptr; + Datum values[Natts_pathman_config]; + bool isnull[Natts_pathman_config]; + bool found; + + /* + * Check if PATHMAN_CONFIG table contains this relation and + * build a partitioned table cache entry (might emit ERROR). + */ + if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + prel = build_pathman_relation_info(relid, values); - UnlockRelationOid(relid, lockmode); - remove_pathman_relation_info(relid); - return NULL; /* exit */ + /* Create a new entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_ENTER, + &found); + Assert(!found); /* it shouldn't just appear out of thin air */ - /* If can't lock children, leave an invalid entry */ - case FCS_COULD_NOT_LOCK: - elog(DEBUG2, "refresh: cannot lock children of relation %u [%u]", - relid, MyProcPid); + /* Cache fresh entry */ + psin->prel = prel; + } - UnlockRelationOid(relid, lockmode); - return NULL; /* exit */ + /* Check invariants */ + Assert(!psin->prel || PrelIsFresh(psin->prel)); - /* Found some children, just unlock parent */ - case FCS_FOUND: - elog(DEBUG2, "refresh: found children of relation %u [%u]", - relid, MyProcPid); +#ifdef USE_RELINFO_LOGGING + elog(DEBUG2, + "fetching %s record for parent %u [%u]", + (psin->prel ? "live" : "NULL"), relid, MyProcPid); +#endif - UnlockRelationOid(relid, lockmode); - break; /* continue */ + return resowner_prel_add(psin->prel); +} - /* Error: unknown result code */ - default: - elog(ERROR, "error in function " - CppAsString(find_inheritance_children_array)); +/* Build a new PartRelationInfo for partitioned relation */ +static PartRelationInfo * +build_pathman_relation_info(Oid relid, Datum *values) +{ + const LOCKMODE lockmode = AccessShareLock; + MemoryContext prel_mcxt; + PartRelationInfo *prel; + + AssertTemporaryContext(); + + /* Lock parent table */ + LockRelationOid(relid, lockmode); + + /* Check if parent exists */ + if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(relid))) + { + /* Nope, it doesn't, remove this entry and exit */ + UnlockRelationOid(relid, lockmode); + return NULL; /* exit */ } - /* - * Fill 'prel' with partition info, raise ERROR if anything is wrong. - * This way PartRelationInfo will remain 'invalid', and 'get' procedure - * will try to refresh it again (and again), until the error is fixed - * by user manually (i.e. invalid check constraints etc). - */ + /* Create a new memory context to store expression tree etc */ + prel_mcxt = AllocSetContextCreate(PathmanParentsCacheContext, + "build_pathman_relation_info", + ALLOCSET_SMALL_SIZES); + + /* Create a new PartRelationInfo */ + prel = MemoryContextAllocZero(prel_mcxt, sizeof(PartRelationInfo)); + prel->relid = relid; + prel->refcount = 0; + prel->fresh = true; + prel->mcxt = prel_mcxt; + + /* Memory leak and cache protection */ PG_TRY(); { + MemoryContext old_mcxt; + const TypeCacheEntry *typcache; + Datum param_values[Natts_pathman_config_params]; + bool param_isnull[Natts_pathman_config_params]; + Oid *prel_children; + uint32 prel_children_count = 0, + i; + + /* Make both arrays point to NULL */ + prel->children = NULL; + prel->ranges = NULL; + + /* Set partitioning type */ + prel->parttype = DatumGetPartType(values[Anum_pathman_config_parttype - 1]); + + /* Switch to persistent memory context */ + old_mcxt = MemoryContextSwitchTo(prel->mcxt); + + /* Build partitioning expression tree */ + prel->expr_cstr = TextDatumGetCString(values[Anum_pathman_config_expr - 1]); + prel->expr = cook_partitioning_expression(relid, prel->expr_cstr, NULL); + fix_opfuncids(prel->expr); + + /* Extract Vars and varattnos of partitioning expression */ + prel->expr_vars = NIL; + prel->expr_atts = NULL; + prel->expr_vars = pull_var_clause_compat(prel->expr, 0, 0); + pull_varattnos((Node *) prel->expr_vars, PART_EXPR_VARNO, &prel->expr_atts); + + MemoryContextSwitchTo(old_mcxt); + + /* First, fetch type of partitioning expression */ + prel->ev_type = exprType(prel->expr); + prel->ev_typmod = exprTypmod(prel->expr); + prel->ev_collid = exprCollation(prel->expr); + + /* Fetch HASH & CMP fuctions and other stuff from type cache */ + typcache = lookup_type_cache(prel->ev_type, + TYPECACHE_CMP_PROC | TYPECACHE_HASH_PROC); + + prel->ev_byval = typcache->typbyval; + prel->ev_len = typcache->typlen; + prel->ev_align = typcache->typalign; + + prel->cmp_proc = typcache->cmp_proc; + prel->hash_proc = typcache->hash_proc; + + /* Try searching for children */ + (void) find_inheritance_children_array(relid, lockmode, false, + &prel_children_count, + &prel_children); + + /* Fill 'prel' with partition info, raise ERROR if anything is wrong */ fill_prel_with_partitions(prel, prel_children, prel_children_count); + + /* Unlock the parent */ + UnlockRelationOid(relid, lockmode); + + /* Now it's time to take care of children */ + for (i = 0; i < prel_children_count; i++) + { + /* Cache this child */ + cache_parent_of_partition(prel_children[i], relid); + + /* Unlock this child */ + UnlockRelationOid(prel_children[i], lockmode); + } + + if (prel_children) + pfree(prel_children); + + /* Read additional parameters ('enable_parent' at the moment) */ + if (read_pathman_params(relid, param_values, param_isnull)) + { + prel->enable_parent = + param_values[Anum_pathman_config_params_enable_parent - 1]; + } + /* Else set default values if they cannot be found */ + else + { + prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; + } } PG_CATCH(); { - /* Remove this parent from parents cache */ - ForgetParent(prel); + /* + * If we managed to create some children but failed later, bounds + * cache now might have obsolete data for something that probably is + * not a partitioned table at all. Remove it. + */ + if (!IsPathmanInitialized()) + /* + * ... unless failure was so hard that caches were already destoyed, + * i.e. extension disabled + */ + PG_RE_THROW(); - /* Delete unused 'prel_mcxt' */ - MemoryContextDelete(prel->mcxt); + if (prel->children != NULL) + { + uint32 i; - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; + for (i = 0; i < PrelChildrenCount(prel); i++) + { + Oid child; + + /* + * We rely on children and ranges array allocated with 0s, not + * random data + */ + if (prel->parttype == PT_HASH) + child = prel->children[i]; + else + { + Assert(prel->parttype == PT_RANGE); + child = prel->ranges[i].child_oid; + } + + forget_bounds_of_partition(child); + } + } + + /* Free this entry */ + free_pathman_relation_info(prel); /* Rethrow ERROR further */ PG_RE_THROW(); } PG_END_TRY(); - /* Peform some actions for each child */ - for (i = 0; i < prel_children_count; i++) - { - /* Add "partition+parent" pair to cache */ - cache_parent_of_partition(prel_children[i], relid); - - /* Now it's time to unlock this child */ - UnlockRelationOid(prel_children[i], lockmode); - } - - if (prel_children) - pfree(prel_children); - - /* Read additional parameters ('enable_parent' at the moment) */ - if (read_pathman_params(relid, param_values, param_isnull)) - { - prel->enable_parent = param_values[Anum_pathman_config_params_enable_parent - 1]; - } - /* Else set default values if they cannot be found */ - else + /* Free trivial entries */ + if (PrelChildrenCount(prel) == 0) { - prel->enable_parent = DEFAULT_PATHMAN_ENABLE_PARENT; + free_pathman_relation_info(prel); + prel = NULL; } - /* We've successfully built a cache entry */ - prel->valid = true; - return prel; } -/* Invalidate PartRelationInfo cache entry. Create new entry if 'found' is NULL. */ -PartRelationInfo * -invalidate_pathman_relation_info(Oid relid, bool *found) +/* Free PartRelationInfo struct safely */ +static void +free_pathman_relation_info(PartRelationInfo *prel) { - bool prel_found; - HASHACTION action = found ? HASH_FIND : HASH_ENTER; - PartRelationInfo *prel; - - prel = pathman_cache_search_relid(partitioned_rels, - relid, action, - &prel_found); + MemoryContextDelete(prel->mcxt); +} - /* Handle valid PartRelationInfo */ - if ((action == HASH_FIND || - (action == HASH_ENTER && prel_found)) && PrelIsValid(prel)) +static PartRelationInfo * +resowner_prel_add(PartRelationInfo *prel) +{ + if (!prel_resowner) { - /* Remove this parent from parents cache */ - ForgetParent(prel); + HASHCTL ctl; - /* Drop cached bounds etc */ - MemoryContextDelete(prel->mcxt); + memset(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(ResourceOwner); + ctl.entrysize = sizeof(prel_resowner_info); + ctl.hcxt = TopPathmanContext; + + prel_resowner = hash_create("prel resowner", + PART_RELS_SIZE, &ctl, + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); + + RegisterResourceReleaseCallback(resonwner_prel_callback, NULL); } - /* Set important default values */ if (prel) { - prel->children = NULL; - prel->ranges = NULL; - prel->mcxt = NULL; - - prel->valid = false; /* now cache entry is invalid */ - } + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; + bool found; + MemoryContext old_mcxt; + + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_ENTER, + &found); + + if (!found) + info->prels = NIL; + + /* Register this 'prel' */ + old_mcxt = MemoryContextSwitchTo(TopPathmanContext); + info->prels = lappend(info->prels, prel); + MemoryContextSwitchTo(old_mcxt); - /* Set 'found' if necessary */ - if (found) *found = prel_found; + /* Save current caller (function:line) */ + LeakTrackerAdd(prel); - elog(DEBUG2, - "Invalidating record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); + /* Finally, increment refcount */ + PrelReferenceCount(prel) += 1; + } return prel; } -/* Get PartRelationInfo from local cache. */ -const PartRelationInfo * -get_pathman_relation_info(Oid relid) +static PartRelationInfo * +resowner_prel_del(PartRelationInfo *prel) { - const PartRelationInfo *prel = pathman_cache_search_relid(partitioned_rels, - relid, HASH_FIND, - NULL); - /* Refresh PartRelationInfo if needed */ - if (prel && !PrelIsValid(prel)) + /* Must be active! */ + Assert(prel_resowner); + + if (prel) { - ItemPointerData iptr; - Datum values[Natts_pathman_config]; - bool isnull[Natts_pathman_config]; + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; - /* Check that PATHMAN_CONFIG table contains this relation */ - if (pathman_config_contains_relation(relid, values, isnull, NULL, &iptr)) + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); + + if (info) { - bool upd_expr = isnull[Anum_pathman_config_cooked_expr - 1]; - if (upd_expr) - pathman_config_refresh_parsed_expression(relid, values, isnull, &iptr); + /* Check that 'prel' is registered! */ + Assert(list_member_ptr(info->prels, prel)); - /* Refresh partitioned table cache entry (might turn NULL) */ - prel = refresh_pathman_relation_info(relid, values, false); + /* Remove it from list */ + info->prels = list_delete_ptr(info->prels, prel); } - /* Else clear remaining cache entry */ - else + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); + + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; + + /* Free list of owners */ + if (PrelReferenceCount(prel) == 0) { - remove_pathman_relation_info(relid); - prel = NULL; /* don't forget to reset 'prel' */ + LeakTrackerFree(prel); } - } - - elog(DEBUG2, - "Fetching %s record for relation %u from pg_pathman's cache [%u]", - (prel ? "live" : "NULL"), relid, MyProcPid); - /* Make sure that 'prel' is valid */ - Assert(!prel || PrelIsValid(prel)); + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } + } return prel; } -/* Acquire lock on a table and try to get PartRelationInfo */ -const PartRelationInfo * -get_pathman_relation_info_after_lock(Oid relid, - bool unlock_if_not_found, - LockAcquireResult *lock_result) +static void +resonwner_prel_callback(ResourceReleasePhase phase, + bool isCommit, + bool isTopLevel, + void *arg) { - const PartRelationInfo *prel; - LockAcquireResult acquire_result; + ResourceOwner resowner = CurrentResourceOwner; + prel_resowner_info *info; - /* Restrict concurrent partition creation (it's dangerous) */ - acquire_result = xact_lock_rel(relid, ShareUpdateExclusiveLock, false); + if (prel_resowner) + { + ListCell *lc; - /* Invalidate cache entry (see AcceptInvalidationMessages()) */ - invalidate_pathman_relation_info(relid, NULL); + info = hash_search(prel_resowner, + (void *) &resowner, + HASH_FIND, + NULL); - /* Set 'lock_result' if asked to */ - if (lock_result) - *lock_result = acquire_result; + if (info) + { + foreach (lc, info->prels) + { + PartRelationInfo *prel = lfirst(lc); - prel = get_pathman_relation_info(relid); - if (!prel && unlock_if_not_found) - UnlockRelationOid(relid, ShareUpdateExclusiveLock); + if (isCommit) + { + /* Print verbose list of *possible* owners */ + LeakTrackerPrint(prel); - return prel; -} + elog(WARNING, + "cache reference leak: PartRelationInfo(%d) has count %d", + PrelParentRelid(prel), PrelReferenceCount(prel)); + } -/* Remove PartRelationInfo from local cache. */ -void -remove_pathman_relation_info(Oid relid) -{ - bool found; + /* Check that refcount is valid */ + Assert(PrelReferenceCount(prel) > 0); - /* Free resources */ - invalidate_pathman_relation_info(relid, &found); + /* Decrease refcount */ + PrelReferenceCount(prel) -= 1; - /* Now let's remove the entry completely */ - if (found) - pathman_cache_search_relid(partitioned_rels, relid, HASH_REMOVE, NULL); + /* Free list of owners */ + LeakTrackerFree(prel); - elog(DEBUG2, - "Removing record for relation %u in pg_pathman's cache [%u]", - relid, MyProcPid); + /* Free this entry if it's time */ + if (PrelReferenceCount(prel) == 0 && !PrelIsFresh(prel)) + { + free_pathman_relation_info(prel); + } + } + + list_free(info->prels); + + hash_search(prel_resowner, + (void *) &resowner, + HASH_REMOVE, + NULL); + } + } } /* Fill PartRelationInfo with partition-related info */ @@ -471,7 +751,7 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Create temporary memory context for loop */ temp_mcxt = AllocSetContextCreate(CurrentMemoryContext, CppAsString(fill_prel_with_partitions), - ALLOCSET_DEFAULT_SIZES); + ALLOCSET_SMALL_SIZES); /* Initialize bounds of partitions */ for (i = 0; i < PrelChildrenCount(prel); i++) @@ -493,13 +773,31 @@ fill_prel_with_partitions(PartRelationInfo *prel, switch (prel->parttype) { case PT_HASH: - prel->children[pbin->part_idx] = pbin->child_rel; + /* + * This might be the case if hash part was dropped, and thus + * children array alloc'ed smaller than needed, but parts + * bound cache still keeps entries with high indexes. + */ + if (pbin->part_idx >= PrelChildrenCount(prel)) + { + /* purged caches will destoy prel, save oid for reporting */ + Oid parent_relid = PrelParentRelid(prel); + + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, (errmsg("pg_pathman's cache for relation %d " + "has not been properly initialized. " + "Looks like one of hash partitions was dropped.", + parent_relid), + errhint(INIT_ERROR_HINT))); + } + + prel->children[pbin->part_idx] = pbin->child_relid; break; case PT_RANGE: { /* Copy child's Oid */ - prel->ranges[i].child_oid = pbin->child_rel; + prel->ranges[i].child_oid = pbin->child_relid; /* Copy all min & max Datums to the persistent mcxt */ old_mcxt = MemoryContextSwitchTo(prel->mcxt); @@ -531,23 +829,15 @@ fill_prel_with_partitions(PartRelationInfo *prel, /* Finalize 'prel' for a RANGE-partitioned table */ if (prel->parttype == PT_RANGE) { - cmp_func_info cmp_info; - - /* Prepare function info */ - fmgr_info(prel->cmp_proc, &cmp_info.flinfo); - cmp_info.collid = prel->ev_collid; - - /* Sort partitions by RangeEntry->min asc */ - qsort_arg((void *) prel->ranges, PrelChildrenCount(prel), - sizeof(RangeEntry), cmp_range_entries, - (void *) &cmp_info); + qsort_range_entries(PrelGetRangesArray(prel), + PrelChildrenCount(prel), + prel); /* Initialize 'prel->children' array */ for (i = 0; i < PrelChildrenCount(prel); i++) prel->children[i] = prel->ranges[i].child_oid; } -#ifdef USE_ASSERT_CHECKING /* Check that each partition Oid has been assigned properly */ if (prel->parttype == PT_HASH) for (i = 0; i < PrelChildrenCount(prel); i++) @@ -555,15 +845,15 @@ fill_prel_with_partitions(PartRelationInfo *prel, if (!OidIsValid(prel->children[i])) { DisablePathman(); /* disable pg_pathman since config is broken */ - elog(ERROR, "pg_pathman's cache for relation \"%s\" " - "has not been properly initialized", - get_rel_name_or_relid(PrelParentRelid(prel))); + ereport(ERROR, (errmsg("pg_pathman's cache for relation \"%s\" " + "has not been properly initialized", + get_rel_name_or_relid(PrelParentRelid(prel))), + errhint(INIT_ERROR_HINT))); } } -#endif } -/* qsort comparison function for RangeEntries */ +/* qsort() comparison function for RangeEntries */ static int cmp_range_entries(const void *p1, const void *p2, void *arg) { @@ -574,551 +864,512 @@ cmp_range_entries(const void *p1, const void *p2, void *arg) return cmp_bounds(&info->flinfo, info->collid, &v1->min, &v2->min); } +void +qsort_range_entries(RangeEntry *entries, int nentries, + const PartRelationInfo *prel) +{ + cmp_func_info cmp_info; + + /* Prepare function info */ + fmgr_info(prel->cmp_proc, &cmp_info.flinfo); + cmp_info.collid = prel->ev_collid; + + /* Sort partitions by RangeEntry->min asc */ + qsort_arg(entries, nentries, + sizeof(RangeEntry), + cmp_range_entries, + (void *) &cmp_info); +} /* - * Partitioning expression routines. + * Common PartRelationInfo checks. Emit ERROR if anything is wrong. */ - -/* Wraps expression in SELECT query and returns parse tree */ -Node * -parse_partitioning_expression(const Oid relid, - const char *expr_cstr, - char **query_string_out, /* ret value #1 */ - Node **parsetree_out) /* ret value #2 */ +void +shout_if_prel_is_invalid(const Oid parent_oid, + const PartRelationInfo *prel, + const PartType expected_part_type) { - SelectStmt *select_stmt; - List *parsetree_list; - MemoryContext old_mcxt; - - const char *sql = "SELECT (%s) FROM ONLY %s.%s"; - char *relname = get_rel_name(relid), - *nspname = get_namespace_name(get_rel_namespace(relid)); - char *query_string = psprintf(sql, expr_cstr, - quote_identifier(nspname), - quote_identifier(relname)); + if (!prel) + elog(ERROR, "relation \"%s\" has no partitions", + get_rel_name_or_relid(parent_oid)); - old_mcxt = CurrentMemoryContext; - - PG_TRY(); - { - parsetree_list = raw_parser(query_string); - } - PG_CATCH(); + /* Check partitioning type unless it's "ANY" */ + if (expected_part_type != PT_ANY && + expected_part_type != prel->parttype) { - ErrorData *error; + char *expected_str; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); + switch (expected_part_type) + { + case PT_HASH: + expected_str = "HASH"; + break; - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + case PT_RANGE: + expected_str = "RANGE"; + break; - ReThrowError(error); + default: + WrongPartType(expected_part_type); + expected_str = NULL; /* keep compiler happy */ + } + + elog(ERROR, "relation \"%s\" is not partitioned by %s", + get_rel_name_or_relid(parent_oid), + expected_str); } - PG_END_TRY(); +} - if (list_length(parsetree_list) != 1) - elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); +/* + * Remap partitioning expression columns for tuple source relation. + * This is a simplified version of functions that return TupleConversionMap. + * It should be faster if expression uses a few fields of relation. + */ +#if PG_VERSION_NUM >= 130000 +AttrMap * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc) +#else +AttrNumber * +PrelExpressionAttributesMap(const PartRelationInfo *prel, + TupleDesc source_tupdesc, + int *map_length) +#endif +{ + Oid parent_relid = PrelParentRelid(prel); + int source_natts = source_tupdesc->natts, + expr_natts = 0; +#if PG_VERSION_NUM >= 130000 + AttrMap *result; +#else + AttrNumber *result; +#endif + AttrNumber i; + bool is_trivial = true; -#if PG_VERSION_NUM >= 100000 - select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; + /* Get largest attribute number used in expression */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + expr_natts = i; + +#if PG_VERSION_NUM >= 130000 + result = make_attrmap(expr_natts); #else - select_stmt = (SelectStmt *) linitial(parsetree_list); + /* Allocate array for map */ + result = (AttrNumber *) palloc0(expr_natts * sizeof(AttrNumber)); #endif - if (query_string_out) - *query_string_out = query_string; + /* Find a match for each attribute */ + i = -1; + while ((i = bms_next_member(prel->expr_atts, i)) >= 0) + { + AttrNumber attnum = i + FirstLowInvalidHeapAttributeNumber; + char *attname = get_attname_compat(parent_relid, attnum); + int j; - if (parsetree_out) - *parsetree_out = (Node *) linitial(parsetree_list); + Assert(attnum <= expr_natts); - return ((ResTarget *) linitial(select_stmt->targetList))->val; -} + for (j = 0; j < source_natts; j++) + { + Form_pg_attribute att = TupleDescAttr(source_tupdesc, j); -/* Parse partitioning expression and return its type and nodeToString() as TEXT */ -Datum -cook_partitioning_expression(const Oid relid, - const char *expr_cstr, - Oid *expr_type_out) /* ret value #1 */ -{ - Node *parse_tree; - List *query_tree_list; + if (att->attisdropped) + continue; /* attrMap[attnum - 1] is already 0 */ - char *query_string, - *expr_serialized = ""; /* keep compiler happy */ + if (strcmp(NameStr(att->attname), attname) == 0) + { +#if PG_VERSION_NUM >= 130000 + result->attnums[attnum - 1] = (AttrNumber) (j + 1); +#else + result[attnum - 1] = (AttrNumber) (j + 1); +#endif + break; + } + } - Datum expr_datum; +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] == 0) +#else + if (result[attnum - 1] == 0) +#endif + elog(ERROR, "cannot find column \"%s\" in child relation", attname); - MemoryContext parse_mcxt, - old_mcxt; +#if PG_VERSION_NUM >= 130000 + if (result->attnums[attnum - 1] != attnum) +#else + if (result[attnum - 1] != attnum) +#endif + is_trivial = false; + } - AssertTemporaryContext(); + /* Check if map is trivial */ + if (is_trivial) + { +#if PG_VERSION_NUM >= 130000 + free_attrmap(result); +#else + pfree(result); +#endif + return NULL; + } - /* - * We use separate memory context here, just to make sure we won't - * leave anything behind after parsing, rewriting and planning. - */ - parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, - CppAsString(cook_partitioning_expression), - ALLOCSET_DEFAULT_SIZES); +#if PG_VERSION_NUM < 130000 + *map_length = expr_natts; +#endif + return result; +} - /* Switch to mcxt for cooking :) */ - old_mcxt = MemoryContextSwitchTo(parse_mcxt); - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); +/* + * Bounds cache routines. + */ - /* We don't need pg_pathman's magic here */ - pathman_hooks_enabled = false; +/* Remove partition's constraint from cache */ +static void +forget_bounds_of_partition(Oid partition) +{ + PartBoundInfo *pbin; - PG_TRY(); - { - Query *query; - Node *expr; - int expr_attr; - Relids expr_varnos; - Bitmapset *expr_varattnos = NULL; + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ - /* This will fail with ERROR in case of wrong expression */ - query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, - NULL, 0, NULL); + if (pbin) + { + /* Free this entry */ + FreePartBoundInfo(pbin); - /* Sanity check #1 */ - if (list_length(query_tree_list) != 1) - elog(ERROR, "partitioning expression produced more than 1 query"); + /* Finally remove this entry from cache */ + pathman_cache_search_relid(bounds_cache, + partition, + HASH_REMOVE, + NULL); + } - query = (Query *) linitial(query_tree_list); +} - /* Sanity check #2 */ - if (list_length(query->targetList) != 1) - elog(ERROR, "there should be exactly 1 partitioning expression"); +/* + * Remove rel's constraint from cache, if relid is partition; + * Remove all children constraints, if it is parent. + */ +void +forget_bounds_of_rel(Oid relid) +{ + PartStatusInfo *psin; - /* Sanity check #3 */ - if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) - elog(ERROR, "subqueries are not allowed in partitioning expression"); + forget_bounds_of_partition(relid); - expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; - expr = eval_const_expressions(NULL, expr); + /* + * If it was the parent who got invalidated, purge children's bounds. + * We assume here that if bounds_cache has something, parent must be also + * in status_cache. Fragile, but seems better then blowing out full bounds + * cache or digging pathman_config on each relcache invalidation. + */ - /* Sanity check #4 */ - if (contain_mutable_functions(expr)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("functions in partitioning expression" - " must be marked IMMUTABLE"))); + /* Find status cache entry for this relation */ + psin = pathman_cache_search_relid(status_cache, + relid, HASH_FIND, + NULL); + if (psin != NULL && psin->prel != NULL) + { + uint32 i; + PartRelationInfo *prel = psin->prel; + Oid *children = PrelGetChildrenArray(prel); - /* Sanity check #5 */ - expr_varnos = pull_varnos(expr); - if (bms_num_members(expr_varnos) != 1 || - relid != ((RangeTblEntry *) linitial(query->rtable))->relid) + for (i = 0; i < PrelChildrenCount(prel); i++) { - elog(ERROR, "partitioning expression should reference table \"%s\"", - get_rel_name(relid)); + forget_bounds_of_partition(children[i]); } + } +} - /* Sanity check #6 */ - pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); - expr_attr = -1; - while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) - { - AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; - HeapTuple htup; - - /* Check that there's no system attributes in expression */ - if (attnum < InvalidAttrNumber) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("system attributes are not supported"))); - - htup = SearchSysCache2(ATTNUM, - ObjectIdGetDatum(relid), - Int16GetDatum(attnum)); - if (HeapTupleIsValid(htup)) - { - bool nullable; +/* Return partition's constraint as expression tree */ +PartBoundInfo * +get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) +{ + PartBoundInfo *pbin; - /* Fetch 'nullable' and free syscache tuple */ - nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; - ReleaseSysCache(htup); + /* + * We might end up building the constraint + * tree that we wouldn't want to keep. + */ + AssertTemporaryContext(); - if (nullable) - ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), - errmsg("column \"%s\" should be marked NOT NULL", - get_attname(relid, attnum)))); - } - } + /* PartRelationInfo must be provided */ + Assert(prel != NULL); - /* Free sets */ - bms_free(expr_varnos); - bms_free(expr_varattnos); + /* Should always be called in transaction */ + Assert(IsTransactionState()); - Assert(expr); - expr_serialized = nodeToString(expr); + /* Should we search in bounds cache? */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_FIND, + NULL) : + NULL; /* don't even bother */ - /* Set 'expr_type_out' if needed */ - if (expr_type_out) - *expr_type_out = exprType(expr); - } - PG_CATCH(); + /* Build new entry */ + if (!pbin) { - ErrorData *error; + PartBoundInfo pbin_local; + Expr *con_expr; - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; + /* Initialize other fields */ + pbin_local.child_relid = partition; + pbin_local.byval = prel->ev_byval; - /* Switch to the original context & copy edata */ - MemoryContextSwitchTo(old_mcxt); - error = CopyErrorData(); - FlushErrorState(); + /* Try to build constraint's expression tree (may emit ERROR) */ + con_expr = get_partition_constraint_expr(partition, true); - /* Adjust error message */ - error->detail = error->message; - error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); - error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; - error->cursorpos = 0; - error->internalpos = 0; + /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ + fill_pbin_with_bounds(&pbin_local, prel, con_expr); - ReThrowError(error); + /* We strive to delay the creation of cache's entry */ + pbin = pg_pathman_enable_bounds_cache ? + pathman_cache_search_relid(bounds_cache, + partition, + HASH_ENTER, + NULL) : + palloc(sizeof(PartBoundInfo)); + + /* Copy data from 'pbin_local' */ + memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); } - PG_END_TRY(); - /* Don't forget to enable pg_pathman's hooks */ - pathman_hooks_enabled = true; + return pbin; +} - /* Switch to previous mcxt */ - MemoryContextSwitchTo(old_mcxt); +void +invalidate_bounds_cache(void) +{ + HASH_SEQ_STATUS status; + PartBoundInfo *pbin; - /* Get Datum of serialized expression (right mcxt) */ - expr_datum = CStringGetTextDatum(expr_serialized); + Assert(offsetof(PartBoundInfo, child_relid) == 0); - /* Free memory */ - MemoryContextDelete(parse_mcxt); - - return expr_datum; -} - -/* Canonicalize user's expression (trim whitespaces etc) */ -char * -canonicalize_partitioning_expression(const Oid relid, - const char *expr_cstr) -{ - Node *parse_tree; - Expr *expr; - char *query_string; - Query *query; - - AssertTemporaryContext(); - - /* First we have to build a raw AST */ - (void) parse_partitioning_expression(relid, expr_cstr, - &query_string, &parse_tree); - - query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); - expr = ((TargetEntry *) linitial(query->targetList))->expr; - - /* We don't care about memory efficiency here */ - return deparse_expression((Node *) expr, - deparse_context_for(get_rel_name(relid), relid), - false, false); -} - -/* Check if query has subqueries */ -static bool -query_contains_subqueries(Node *node, void *context) -{ - if (node == NULL) - return false; + hash_seq_init(&status, bounds_cache); - /* We've met a subquery */ - if (IsA(node, Query)) - return true; + while ((pbin = hash_seq_search(&status)) != NULL) + { + FreePartBoundInfo(pbin); - return expression_tree_walker(node, query_contains_subqueries, NULL); + pathman_cache_search_relid(bounds_cache, + pbin->child_relid, + HASH_REMOVE, NULL); + } } - /* - * Functions for delayed invalidation. + * Get constraint expression tree of a partition. + * + * build_check_constraint_name_relid_internal() is used to build conname. */ - -/* Add new delayed pathman shutdown job (DROP EXTENSION) */ -void -delay_pathman_shutdown(void) -{ - delayed_shutdown = true; -} - -/* Add new delayed invalidation job for a [ex-]parent relation */ -void -delay_invalidation_parent_rel(Oid parent) +Expr * +get_partition_constraint_expr(Oid partition, bool raise_error) { - list_add_unique(delayed_invalidation_parent_rels, parent); -} + Oid conid; /* constraint Oid */ + char *conname; /* constraint name */ + HeapTuple con_tuple; + Datum conbin_datum; + bool conbin_isnull; + Expr *expr; /* expression tree for constraint */ -/* Add new delayed invalidation job for a vague relation */ -void -delay_invalidation_vague_rel(Oid vague_rel) -{ - list_add_unique(delayed_invalidation_vague_rels, vague_rel); -} + conname = build_check_constraint_name_relid_internal(partition); + conid = get_relation_constraint_oid(partition, conname, true); -/* Finish all pending invalidation jobs if possible */ -void -finish_delayed_invalidation(void) -{ - /* Exit early if there's nothing to do */ - if (delayed_invalidation_parent_rels == NIL && - delayed_invalidation_vague_rels == NIL && - delayed_shutdown == false) + if (!OidIsValid(conid)) { - return; + if (!raise_error) + return NULL; + + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" does not exist", + conname, get_rel_name_or_relid(partition)))); } - /* Check that current state is transactional */ - if (IsTransactionState()) + con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); + if (!HeapTupleIsValid(con_tuple)) { - Oid *parents = NULL; - int parents_count; - bool parents_fetched = false; - ListCell *lc; + if (!raise_error) + return NULL; - /* Handle the probable 'DROP EXTENSION' case */ - if (delayed_shutdown) - { - Oid cur_pathman_config_relid; + ereport(ERROR, + (errmsg("cache lookup failed for constraint \"%s\" of partition \"%s\"", + conname, get_rel_name_or_relid(partition)))); + } - /* Unset 'shutdown' flag */ - delayed_shutdown = false; + conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, + Anum_pg_constraint_conbin, + &conbin_isnull); + if (conbin_isnull) + { + if (!raise_error) + return NULL; - /* Get current PATHMAN_CONFIG relid */ - cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, - get_pathman_schema()); + ereport(ERROR, + (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", + conname, get_rel_name_or_relid(partition)))); + } + pfree(conname); - /* Check that PATHMAN_CONFIG table has indeed been dropped */ - if (cur_pathman_config_relid == InvalidOid || - cur_pathman_config_relid != get_pathman_config_relid(true)) - { - /* Ok, let's unload pg_pathman's config */ - unload_config(); + /* Finally we get a constraint expression tree */ + expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); - /* Disregard all remaining invalidation jobs */ - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + /* Don't foreget to release syscache tuple */ + ReleaseSysCache(con_tuple); - /* No need to continue, exit */ - return; - } - } + return expr; +} - /* Process relations that are (or were) definitely partitioned */ - foreach (lc, delayed_invalidation_parent_rels) - { - Oid parent = lfirst_oid(lc); +/* Fill PartBoundInfo with bounds/hash */ +static void +fill_pbin_with_bounds(PartBoundInfo *pbin, + const PartRelationInfo *prel, + const Expr *constraint_expr) +{ + AssertTemporaryContext(); - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(parent))) - continue; + /* Copy partitioning type to 'pbin' */ + pbin->parttype = prel->parttype; - /* Fetch all partitioned tables */ - if (!parents_fetched) + /* Perform a partitioning_type-dependent task */ + switch (prel->parttype) + { + case PT_HASH: { - parents = read_parent_oids(&parents_count); - parents_fetched = true; + if (!validate_hash_constraint(constraint_expr, + prel, &pbin->part_idx)) + { + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for HASH partition \"%s\"", + get_rel_name_or_relid(pbin->child_relid)), + errhint(INIT_ERROR_HINT))); + } } + break; - /* Check if parent still exists */ - if (bsearch_oid(parent, parents, parents_count)) - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(parent, NULL); - else - remove_pathman_relation_info(parent); - } - - /* Process all other vague cases */ - foreach (lc, delayed_invalidation_vague_rels) - { - Oid vague_rel = lfirst_oid(lc); + case PT_RANGE: + { + Datum lower, upper; + bool lower_null, upper_null; - /* Skip if it's a TOAST table */ - if (IsToastNamespace(get_rel_namespace(vague_rel))) - continue; + if (validate_range_constraint(constraint_expr, + prel, &lower, &upper, + &lower_null, &upper_null)) + { + MemoryContext old_mcxt; - /* Fetch all partitioned tables */ - if (!parents_fetched) - { - parents = read_parent_oids(&parents_count); - parents_fetched = true; - } + /* Switch to the persistent memory context */ + old_mcxt = MemoryContextSwitchTo(PathmanBoundsCacheContext); - /* It might be a partitioned table or a partition */ - if (!try_invalidate_parent(vague_rel, parents, parents_count)) - { - PartParentSearch search; - Oid parent; - List *fresh_rels = delayed_invalidation_parent_rels; + pbin->range_min = lower_null ? + MakeBoundInf(MINUS_INFINITY) : + MakeBound(datumCopy(lower, + prel->ev_byval, + prel->ev_len)); - parent = get_parent_of_partition(vague_rel, &search); + pbin->range_max = upper_null ? + MakeBoundInf(PLUS_INFINITY) : + MakeBound(datumCopy(upper, + prel->ev_byval, + prel->ev_len)); - switch (search) + /* Switch back */ + MemoryContextSwitchTo(old_mcxt); + } + else { - /* - * Two main cases: - * - It's *still* parent (in PATHMAN_CONFIG) - * - It *might have been* parent before (not in PATHMAN_CONFIG) - */ - case PPS_ENTRY_PART_PARENT: - case PPS_ENTRY_PARENT: - { - /* Skip if we've already refreshed this parent */ - if (!list_member_oid(fresh_rels, parent)) - try_invalidate_parent(parent, parents, parents_count); - } - break; - - /* How come we still don't know?? */ - case PPS_NOT_SURE: - elog(ERROR, "Unknown table status, this should never happen"); - break; - - default: - break; + DisablePathman(); /* disable pg_pathman since config is broken */ + ereport(ERROR, + (errmsg("wrong constraint format for RANGE partition \"%s\"", + get_rel_name_or_relid(pbin->child_relid)), + errhint(INIT_ERROR_HINT))); } } - } - - free_invalidation_list(delayed_invalidation_parent_rels); - free_invalidation_list(delayed_invalidation_vague_rels); + break; - if (parents) - pfree(parents); + default: + { + DisablePathman(); /* disable pg_pathman since config is broken */ + WrongPartType(prel->parttype); + } + break; } } /* - * cache\forget\get PartParentInfo functions. + * Parents cache routines. */ -/* Create "partition+parent" pair in local cache */ +/* Add parent of partition to cache */ void cache_parent_of_partition(Oid partition, Oid parent) { - bool found; PartParentInfo *ppar; - ppar = pathman_cache_search_relid(parent_cache, + /* Why would we want to call it not in transaction? */ + Assert(IsTransactionState()); + + /* Create a new cache entry */ + ppar = pathman_cache_search_relid(parents_cache, partition, HASH_ENTER, - &found); - elog(DEBUG2, - found ? - "Refreshing record for child %u in pg_pathman's cache [%u]" : - "Creating new record for child %u in pg_pathman's cache [%u]", - partition, MyProcPid); + NULL); - ppar->child_rel = partition; - ppar->parent_rel = parent; + /* Fill entry with parent */ + ppar->parent_relid = parent; } -/* Remove "partition+parent" pair from cache & return parent's Oid */ -Oid -forget_parent_of_partition(Oid partition, PartParentSearch *status) +/* Remove parent of partition from cache */ +void +forget_parent_of_partition(Oid partition) { - return get_parent_of_partition_internal(partition, status, HASH_REMOVE); + pathman_cache_search_relid(parents_cache, + partition, + HASH_REMOVE, + NULL); } -/* Return partition parent's Oid */ +/* Return parent of partition */ Oid -get_parent_of_partition(Oid partition, PartParentSearch *status) -{ - return get_parent_of_partition_internal(partition, status, HASH_FIND); -} - -/* - * Get [and remove] "partition+parent" pair from cache, - * also check syscache if 'status' is provided. - * - * "status == NULL" implies that we don't care about - * neither syscache nor PATHMAN_CONFIG table contents. - */ -static Oid -get_parent_of_partition_internal(Oid partition, - PartParentSearch *status, - HASHACTION action) +get_parent_of_partition(Oid partition) { - const char *action_str; /* "Fetching"\"Resetting" */ - Oid parent; - PartParentInfo *ppar = pathman_cache_search_relid(parent_cache, - partition, - HASH_FIND, - NULL); - /* Set 'action_str' */ - switch (action) - { - case HASH_REMOVE: - action_str = "Resetting"; - break; + PartParentInfo *ppar; - case HASH_FIND: - action_str = "Fetching"; - break; + /* Should always be called in transaction */ + Assert(IsTransactionState()); - default: - elog(ERROR, "Unexpected HTAB action %u", action); - } + /* We don't cache catalog objects */ + if (partition < FirstNormalObjectId) + return InvalidOid; - elog(DEBUG2, - "%s %s record for child %u from pg_pathman's cache [%u]", - action_str, (ppar ? "live" : "NULL"), partition, MyProcPid); + ppar = pathman_cache_search_relid(parents_cache, + partition, + HASH_FIND, + NULL); + /* Nice, we have a cached entry */ if (ppar) { - if (status) *status = PPS_ENTRY_PART_PARENT; - parent = ppar->parent_rel; - - /* Remove entry if necessary */ - if (action == HASH_REMOVE) - pathman_cache_search_relid(parent_cache, partition, - HASH_REMOVE, NULL); - } - /* Try fetching parent from syscache if 'status' is provided */ - else if (status) - parent = try_syscache_parent_search(partition, status); - else - parent = InvalidOid; /* we don't have to set status */ - - return parent; -} - -/* Try to find parent of a partition using syscache & PATHMAN_CONFIG */ -static Oid -try_syscache_parent_search(Oid partition, PartParentSearch *status) -{ - if (!IsTransactionState()) - { - /* We could not perform search */ - if (status) *status = PPS_NOT_SURE; - - return InvalidOid; + return ppar->parent_relid; } + /* Bad luck, let's search in catalog */ else { Relation relation; ScanKeyData key[1]; SysScanDesc scan; - HeapTuple inheritsTuple; + HeapTuple htup; Oid parent = InvalidOid; - /* At first we assume parent does not exist (not a partition) */ - if (status) *status = PPS_ENTRY_NOT_FOUND; - - relation = heap_open(InheritsRelationId, AccessShareLock); + relation = heap_open_compat(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, @@ -1128,315 +1379,359 @@ try_syscache_parent_search(Oid partition, PartParentSearch *status) scan = systable_beginscan(relation, InheritsRelidSeqnoIndexId, true, NULL, 1, key); - while ((inheritsTuple = systable_getnext(scan)) != NULL) + while ((htup = systable_getnext(scan)) != NULL) { - parent = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhparent; - - /* - * NB: don't forget that 'inh' flag does not immediately - * mean that this is a pg_pathman's partition. It might - * be just a casual inheriting table. - */ - if (status) *status = PPS_ENTRY_PARENT; + /* Extract parent from catalog tuple */ + Oid inhparent = ((Form_pg_inherits) GETSTRUCT(htup))->inhparent; /* Check that PATHMAN_CONFIG contains this table */ - if (pathman_config_contains_relation(parent, NULL, NULL, NULL, NULL)) + if (pathman_config_contains_relation(inhparent, NULL, NULL, NULL, NULL)) { - /* We've found the entry, update status */ - if (status) *status = PPS_ENTRY_PART_PARENT; + /* We should return this parent */ + parent = inhparent; + + /* Now, let's cache this parent */ + cache_parent_of_partition(partition, parent); } break; /* there should be no more rows */ } systable_endscan(scan); - heap_close(relation, AccessShareLock); + heap_close_compat(relation, AccessShareLock); return parent; } } -/* Try to invalidate cache entry for relation 'parent' */ -static bool -try_invalidate_parent(Oid relid, Oid *parents, int parents_count) +void +invalidate_parents_cache(void) { - /* Check if this is a partitioned table */ - if (bsearch_oid(relid, parents, parents_count)) - { - /* get_pathman_relation_info() will refresh this entry */ - invalidate_pathman_relation_info(relid, NULL); + HASH_SEQ_STATUS status; + PartParentInfo *ppar; - /* Success */ - return true; - } + Assert(offsetof(PartParentInfo, child_relid) == 0); - /* Clear remaining cache entry */ - remove_pathman_relation_info(relid); + hash_seq_init(&status, parents_cache); - /* Not a partitioned relation */ - return false; + while ((ppar = hash_seq_search(&status)) != NULL) + { + /* This is a plain structure, no need to pfree() */ + + pathman_cache_search_relid(parents_cache, + ppar->child_relid, + HASH_REMOVE, NULL); + } } /* - * forget\get constraint functions. + * Partitioning expression routines. */ -/* Remove partition's constraint from cache */ -void -forget_bounds_of_partition(Oid partition) +/* Wraps expression in SELECT query and returns parse tree */ +Node * +parse_partitioning_expression(const Oid relid, + const char *expr_cstr, + char **query_string_out, /* ret value #1 */ + Node **parsetree_out) /* ret value #2 */ { - PartBoundInfo *pbin; + SelectStmt *select_stmt; + List *parsetree_list; + MemoryContext old_mcxt; - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ + const char *sql = "SELECT (%s) FROM ONLY %s.%s"; + char *relname = get_rel_name(relid), + *nspname = get_namespace_name(get_rel_namespace(relid)); + char *query_string = psprintf(sql, expr_cstr, + quote_identifier(nspname), + quote_identifier(relname)); - /* Free this entry */ - if (pbin) - { - /* Call pfree() if it's RANGE bounds */ - if (pbin->parttype == PT_RANGE) - { - FreeBound(&pbin->range_min, pbin->byval); - FreeBound(&pbin->range_max, pbin->byval); - } + old_mcxt = CurrentMemoryContext; - /* Finally remove this entry from cache */ - pathman_cache_search_relid(bound_cache, - partition, - HASH_REMOVE, - NULL); + PG_TRY(); + { + parsetree_list = raw_parser_compat(query_string); } + PG_CATCH(); + { + ErrorData *error; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(PARSE_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; + + ReThrowError(error); + } + PG_END_TRY(); + + if (list_length(parsetree_list) != 1) + elog(ERROR, "expression \"%s\" produced more than one query", expr_cstr); + +#if PG_VERSION_NUM >= 100000 + select_stmt = (SelectStmt *) ((RawStmt *) linitial(parsetree_list))->stmt; +#else + select_stmt = (SelectStmt *) linitial(parsetree_list); +#endif + + if (query_string_out) + *query_string_out = query_string; + + if (parsetree_out) + *parsetree_out = (Node *) linitial(parsetree_list); + + return ((ResTarget *) linitial(select_stmt->targetList))->val; } -/* Return partition's constraint as expression tree */ -PartBoundInfo * -get_bounds_of_partition(Oid partition, const PartRelationInfo *prel) +/* Parse partitioning expression and return its type and nodeToString() + * (or nodeToStringWithLocations() in version 17 and higher) as TEXT */ +Node * +cook_partitioning_expression(const Oid relid, + const char *expr_cstr, + Oid *expr_type_out) /* ret value #1 */ { - PartBoundInfo *pbin; + Node *expr; + Node *parse_tree; + List *query_tree_list; + + char *query_string; + + MemoryContext parse_mcxt, + old_mcxt; + + AssertTemporaryContext(); /* - * We might end up building the constraint - * tree that we wouldn't want to keep. + * We use separate memory context here, just to make sure we won't + * leave anything behind after parsing, rewriting and planning. */ - AssertTemporaryContext(); + parse_mcxt = AllocSetContextCreate(CurrentMemoryContext, + CppAsString(cook_partitioning_expression), + ALLOCSET_SMALL_SIZES); - /* Should we search in bounds cache? */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_FIND, - NULL) : - NULL; /* don't even bother */ + /* Switch to mcxt for cooking :) */ + old_mcxt = MemoryContextSwitchTo(parse_mcxt); - /* Build new entry */ - if (!pbin) + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); + + /* We don't need pg_pathman's magic here */ + pathman_hooks_enabled = false; + + PG_TRY(); { - PartBoundInfo pbin_local; - Expr *con_expr; + Query *query; + int expr_attr; + Relids expr_varnos; + Bitmapset *expr_varattnos = NULL; - /* Initialize other fields */ - pbin_local.child_rel = partition; - pbin_local.byval = prel->ev_byval; + /* This will fail with ERROR in case of wrong expression */ + query_tree_list = pg_analyze_and_rewrite_compat(parse_tree, query_string, + NULL, 0, NULL); - /* Try to build constraint's expression tree (may emit ERROR) */ - con_expr = get_partition_constraint_expr(partition); + /* Sanity check #1 */ + if (list_length(query_tree_list) != 1) + elog(ERROR, "partitioning expression produced more than 1 query"); - /* Grab bounds/hash and fill in 'pbin_local' (may emit ERROR) */ - fill_pbin_with_bounds(&pbin_local, prel, con_expr); + query = (Query *) linitial(query_tree_list); - /* We strive to delay the creation of cache's entry */ - pbin = pg_pathman_enable_bounds_cache ? - pathman_cache_search_relid(bound_cache, - partition, - HASH_ENTER, - NULL) : - palloc(sizeof(PartBoundInfo)); + /* Sanity check #2 */ + if (list_length(query->targetList) != 1) + elog(ERROR, "there should be exactly 1 partitioning expression"); - /* Copy data from 'pbin_local' */ - memcpy(pbin, &pbin_local, sizeof(PartBoundInfo)); - } + /* Sanity check #3 */ + if (query_tree_walker(query, query_contains_subqueries, NULL, 0)) + elog(ERROR, "subqueries are not allowed in partitioning expression"); - return pbin; -} + expr = (Node *) ((TargetEntry *) linitial(query->targetList))->expr; + expr = eval_const_expressions(NULL, expr); -/* - * Get constraint expression tree of a partition. - * - * build_check_constraint_name_internal() is used to build conname. - */ -static Expr * -get_partition_constraint_expr(Oid partition) -{ - Oid conid; /* constraint Oid */ - char *conname; /* constraint name */ - HeapTuple con_tuple; - Datum conbin_datum; - bool conbin_isnull; - Expr *expr; /* expression tree for constraint */ + /* Sanity check #4 */ + if (contain_mutable_functions(expr)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg("functions in partitioning expression" + " must be marked IMMUTABLE"))); - conname = build_check_constraint_name_relid_internal(partition); - conid = get_relation_constraint_oid(partition, conname, true); + /* Sanity check #5 */ + expr_varnos = pull_varnos_compat(NULL, expr); + if (bms_num_members(expr_varnos) != 1 || + relid != ((RangeTblEntry *) linitial(query->rtable))->relid) + { + elog(ERROR, "partitioning expression should reference table \"%s\"", + get_rel_name(relid)); + } - if (!OidIsValid(conid)) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("constraint \"%s\" of partition \"%s\" does not exist", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - } + /* Sanity check #6 */ + pull_varattnos(expr, bms_singleton_member(expr_varnos), &expr_varattnos); + expr_attr = -1; + while ((expr_attr = bms_next_member(expr_varattnos, expr_attr)) >= 0) + { + AttrNumber attnum = expr_attr + FirstLowInvalidHeapAttributeNumber; + HeapTuple htup; - con_tuple = SearchSysCache1(CONSTROID, ObjectIdGetDatum(conid)); - conbin_datum = SysCacheGetAttr(CONSTROID, con_tuple, - Anum_pg_constraint_conbin, - &conbin_isnull); - if (conbin_isnull) + /* Check that there's no system attributes in expression */ + if (attnum < InvalidAttrNumber) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("system attributes are not supported"))); + + htup = SearchSysCache2(ATTNUM, + ObjectIdGetDatum(relid), + Int16GetDatum(attnum)); + if (HeapTupleIsValid(htup)) + { + bool nullable; + + /* Fetch 'nullable' and free syscache tuple */ + nullable = !((Form_pg_attribute) GETSTRUCT(htup))->attnotnull; + ReleaseSysCache(htup); + + if (nullable) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), + errmsg("column \"%s\" should be marked NOT NULL", + get_attname_compat(relid, attnum)))); + } + } + + /* Free sets */ + bms_free(expr_varnos); + bms_free(expr_varattnos); + + Assert(expr); + + /* Set 'expr_type_out' if needed */ + if (expr_type_out) + *expr_type_out = exprType(expr); + } + PG_CATCH(); { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(WARNING, - (errmsg("constraint \"%s\" of partition \"%s\" has NULL conbin", - conname, get_rel_name_or_relid(partition)), - errhint(INIT_ERROR_HINT))); - pfree(conname); + ErrorData *error; + + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; + + /* Switch to the original context & copy edata */ + MemoryContextSwitchTo(old_mcxt); + error = CopyErrorData(); + FlushErrorState(); + + /* Adjust error message */ + error->detail = error->message; + error->message = psprintf(COOK_PART_EXPR_ERROR, expr_cstr); + error->sqlerrcode = ERRCODE_INVALID_PARAMETER_VALUE; + error->cursorpos = 0; + error->internalpos = 0; - return NULL; /* could not parse */ + ReThrowError(error); } - pfree(conname); + PG_END_TRY(); - /* Finally we get a constraint expression tree */ - expr = (Expr *) stringToNode(TextDatumGetCString(conbin_datum)); + /* Don't forget to enable pg_pathman's hooks */ + pathman_hooks_enabled = true; - /* Don't foreget to release syscache tuple */ - ReleaseSysCache(con_tuple); + /* Switch to previous mcxt */ + MemoryContextSwitchTo(old_mcxt); + + /* Get Datum of serialized expression (right mcxt) */ + expr = copyObject(expr); + + /* Free memory */ + MemoryContextDelete(parse_mcxt); return expr; } -/* Fill PartBoundInfo with bounds/hash */ -static void -fill_pbin_with_bounds(PartBoundInfo *pbin, - const PartRelationInfo *prel, - const Expr *constraint_expr) +/* Canonicalize user's expression (trim whitespaces etc) */ +char * +canonicalize_partitioning_expression(const Oid relid, + const char *expr_cstr) { - AssertTemporaryContext(); - - /* Copy partitioning type to 'pbin' */ - pbin->parttype = prel->parttype; + Node *parse_tree; + Expr *expr; + char *query_string; + Query *query; - /* Perform a partitioning_type-dependent task */ - switch (prel->parttype) - { - case PT_HASH: - { - if (!validate_hash_constraint(constraint_expr, - prel, &pbin->part_idx)) - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("wrong constraint format for HASH partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), - errhint(INIT_ERROR_HINT))); - } - } - break; - - case PT_RANGE: - { - Datum lower, upper; - bool lower_null, upper_null; + AssertTemporaryContext(); - if (validate_range_constraint(constraint_expr, - prel, &lower, &upper, - &lower_null, &upper_null)) - { - MemoryContext old_mcxt; + /* First we have to build a raw AST */ + (void) parse_partitioning_expression(relid, expr_cstr, + &query_string, &parse_tree); - /* Switch to the persistent memory context */ - old_mcxt = MemoryContextSwitchTo(PathmanBoundCacheContext); + query = parse_analyze_compat(parse_tree, query_string, NULL, 0, NULL); + expr = ((TargetEntry *) linitial(query->targetList))->expr; - pbin->range_min = lower_null ? - MakeBoundInf(MINUS_INFINITY) : - MakeBound(datumCopy(lower, - prel->ev_byval, - prel->ev_len)); + /* We don't care about memory efficiency here */ + return deparse_expression((Node *) expr, + deparse_context_for(get_rel_name(relid), relid), + false, false); +} - pbin->range_max = upper_null ? - MakeBoundInf(PLUS_INFINITY) : - MakeBound(datumCopy(upper, - prel->ev_byval, - prel->ev_len)); +/* Check if query has subqueries */ +static bool +query_contains_subqueries(Node *node, void *context) +{ + if (node == NULL) + return false; - /* Switch back */ - MemoryContextSwitchTo(old_mcxt); - } - else - { - DisablePathman(); /* disable pg_pathman since config is broken */ - ereport(ERROR, - (errmsg("wrong constraint format for RANGE partition \"%s\"", - get_rel_name_or_relid(pbin->child_rel)), - errhint(INIT_ERROR_HINT))); - } - } - break; + /* We've met a subquery */ + if (IsA(node, Query)) + return true; - default: - { - DisablePathman(); /* disable pg_pathman since config is broken */ - WrongPartType(prel->parttype); - } - break; - } + return expression_tree_walker(node, query_contains_subqueries, NULL); } /* - * Common PartRelationInfo checks. Emit ERROR if anything is wrong. + * Functions for delayed invalidation. */ + +/* Add new delayed pathman shutdown job (DROP EXTENSION) */ void -shout_if_prel_is_invalid(const Oid parent_oid, - const PartRelationInfo *prel, - const PartType expected_part_type) +delay_pathman_shutdown(void) { - if (!prel) - elog(ERROR, "relation \"%s\" has no partitions", - get_rel_name_or_relid(parent_oid)); - - if (!PrelIsValid(prel)) - elog(ERROR, "pg_pathman's cache contains invalid entry " - "for relation \"%s\" [%u]", - get_rel_name_or_relid(parent_oid), - MyProcPid); + delayed_shutdown = true; +} - /* Check partitioning type unless it's "ANY" */ - if (expected_part_type != PT_ANY && - expected_part_type != prel->parttype) +/* Finish all pending invalidation jobs if possible */ +void +finish_delayed_invalidation(void) +{ + /* Check that current state is transactional */ + if (IsTransactionState()) { - char *expected_str; + AcceptInvalidationMessages(); - switch (expected_part_type) + /* Handle the probable 'DROP EXTENSION' case */ + if (delayed_shutdown) { - case PT_HASH: - expected_str = "HASH"; - break; + Oid cur_pathman_config_relid; - case PT_RANGE: - expected_str = "RANGE"; - break; + /* Unset 'shutdown' flag */ + delayed_shutdown = false; - default: - WrongPartType(expected_part_type); - expected_str = NULL; /* keep compiler happy */ - } + /* Get current PATHMAN_CONFIG relid */ + cur_pathman_config_relid = get_relname_relid(PATHMAN_CONFIG, + get_pathman_schema()); - elog(ERROR, "relation \"%s\" is not partitioned by %s", - get_rel_name_or_relid(parent_oid), - expected_str); + /* Check that PATHMAN_CONFIG table has indeed been dropped */ + if (cur_pathman_config_relid == InvalidOid || + cur_pathman_config_relid != get_pathman_config_relid(true)) + { + /* Ok, let's unload pg_pathman's config */ + unload_config(); + + /* No need to continue, exit */ + return; + } + } } } diff --git a/src/runtimeappend.c b/src/runtime_append.c similarity index 58% rename from src/runtimeappend.c rename to src/runtime_append.c index 9e93aedf..a90c101a 100644 --- a/src/runtimeappend.c +++ b/src/runtime_append.c @@ -8,7 +8,9 @@ * ------------------------------------------------------------------------ */ -#include "runtimeappend.h" +#include "compat/pg_compat.h" + +#include "runtime_append.h" #include "utils/guc.h" @@ -21,25 +23,25 @@ CustomExecMethods runtimeappend_exec_methods; void -init_runtimeappend_static_data(void) +init_runtime_append_static_data(void) { - runtimeappend_path_methods.CustomName = "RuntimeAppend"; - runtimeappend_path_methods.PlanCustomPath = create_runtimeappend_plan; + runtimeappend_path_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_path_methods.PlanCustomPath = create_runtime_append_plan; - runtimeappend_plan_methods.CustomName = "RuntimeAppend"; - runtimeappend_plan_methods.CreateCustomScanState = runtimeappend_create_scan_state; + runtimeappend_plan_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_plan_methods.CreateCustomScanState = runtime_append_create_scan_state; - runtimeappend_exec_methods.CustomName = "RuntimeAppend"; - runtimeappend_exec_methods.BeginCustomScan = runtimeappend_begin; - runtimeappend_exec_methods.ExecCustomScan = runtimeappend_exec; - runtimeappend_exec_methods.EndCustomScan = runtimeappend_end; - runtimeappend_exec_methods.ReScanCustomScan = runtimeappend_rescan; + runtimeappend_exec_methods.CustomName = RUNTIME_APPEND_NODE_NAME; + runtimeappend_exec_methods.BeginCustomScan = runtime_append_begin; + runtimeappend_exec_methods.ExecCustomScan = runtime_append_exec; + runtimeappend_exec_methods.EndCustomScan = runtime_append_end; + runtimeappend_exec_methods.ReScanCustomScan = runtime_append_rescan; runtimeappend_exec_methods.MarkPosCustomScan = NULL; runtimeappend_exec_methods.RestrPosCustomScan = NULL; - runtimeappend_exec_methods.ExplainCustomScan = runtimeappend_explain; + runtimeappend_exec_methods.ExplainCustomScan = runtime_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimeappend", - "Enables the planner's use of RuntimeAppend custom node.", + "Enables the planner's use of " RUNTIME_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtimeappend, true, @@ -48,13 +50,15 @@ init_runtimeappend_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtimeappend_plan_methods); } Path * -create_runtimeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { return create_append_path_common(root, inner_append, param_info, @@ -64,9 +68,9 @@ create_runtimeappend_path(PlannerInfo *root, } Plan * -create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { return create_append_plan_common(root, rel, best_path, tlist, @@ -75,7 +79,7 @@ create_runtimeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimeappend_create_scan_state(CustomScan *node) +runtime_append_create_scan_state(CustomScan *node) { return create_append_scan_state_common(node, &runtimeappend_exec_methods, @@ -83,7 +87,7 @@ runtimeappend_create_scan_state(CustomScan *node) } void -runtimeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -116,25 +120,25 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimeappend_exec(CustomScanState *node) +runtime_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimeappend_end(CustomScanState *node) +runtime_append_end(CustomScanState *node) { end_append_common(node); } void -runtimeappend_rescan(CustomScanState *node) +runtime_append_rescan(CustomScanState *node) { rescan_append_common(node); } void -runtimeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeAppendState *scan_state = (RuntimeAppendState *) node; diff --git a/src/runtime_merge_append.c b/src/runtime_merge_append.c index 453ebab1..5edd803c 100644 --- a/src/runtime_merge_append.c +++ b/src/runtime_merge_append.c @@ -3,7 +3,7 @@ * runtime_merge_append.c * RuntimeMergeAppend node's function definitions and global variables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -19,10 +19,14 @@ #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "nodes/plannodes.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/cost.h" +#include "optimizer/var.h" +#endif #include "optimizer/planmain.h" #include "optimizer/tlist.h" -#include "optimizer/var.h" #include "utils/builtins.h" #include "utils/guc.h" #include "utils/lsyscache.h" @@ -191,23 +195,23 @@ unpack_runtimemergeappend_private(RuntimeMergeAppendState *scan_state, void init_runtime_merge_append_static_data(void) { - runtime_merge_append_path_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_path_methods.PlanCustomPath = create_runtimemergeappend_plan; + runtime_merge_append_path_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_path_methods.PlanCustomPath = create_runtime_merge_append_plan; - runtime_merge_append_plan_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_plan_methods.CreateCustomScanState = runtimemergeappend_create_scan_state; + runtime_merge_append_plan_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_plan_methods.CreateCustomScanState = runtime_merge_append_create_scan_state; - runtime_merge_append_exec_methods.CustomName = "RuntimeMergeAppend"; - runtime_merge_append_exec_methods.BeginCustomScan = runtimemergeappend_begin; - runtime_merge_append_exec_methods.ExecCustomScan = runtimemergeappend_exec; - runtime_merge_append_exec_methods.EndCustomScan = runtimemergeappend_end; - runtime_merge_append_exec_methods.ReScanCustomScan = runtimemergeappend_rescan; + runtime_merge_append_exec_methods.CustomName = RUNTIME_MERGE_APPEND_NODE_NAME; + runtime_merge_append_exec_methods.BeginCustomScan = runtime_merge_append_begin; + runtime_merge_append_exec_methods.ExecCustomScan = runtime_merge_append_exec; + runtime_merge_append_exec_methods.EndCustomScan = runtime_merge_append_end; + runtime_merge_append_exec_methods.ReScanCustomScan = runtime_merge_append_rescan; runtime_merge_append_exec_methods.MarkPosCustomScan = NULL; runtime_merge_append_exec_methods.RestrPosCustomScan = NULL; - runtime_merge_append_exec_methods.ExplainCustomScan = runtimemergeappend_explain; + runtime_merge_append_exec_methods.ExplainCustomScan = runtime_merge_append_explain; DefineCustomBoolVariable("pg_pathman.enable_runtimemergeappend", - "Enables the planner's use of RuntimeMergeAppend custom node.", + "Enables the planner's use of " RUNTIME_MERGE_APPEND_NODE_NAME " custom node.", NULL, &pg_pathman_enable_runtime_merge_append, true, @@ -216,13 +220,15 @@ init_runtime_merge_append_static_data(void) NULL, NULL, NULL); + + RegisterCustomScanMethods(&runtime_merge_append_plan_methods); } Path * -create_runtimemergeappend_path(PlannerInfo *root, - AppendPath *inner_append, - ParamPathInfo *param_info, - double sel) +create_runtime_merge_append_path(PlannerInfo *root, + AppendPath *inner_append, + ParamPathInfo *param_info, + double sel) { RelOptInfo *rel = inner_append->path.parent; Path *path; @@ -245,9 +251,9 @@ create_runtimemergeappend_path(PlannerInfo *root, } Plan * -create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, - CustomPath *best_path, List *tlist, - List *clauses, List *custom_plans) +create_runtime_merge_append_plan(PlannerInfo *root, RelOptInfo *rel, + CustomPath *best_path, List *tlist, + List *clauses, List *custom_plans) { CustomScan *node; Plan *plan; @@ -337,7 +343,7 @@ create_runtimemergeappend_plan(PlannerInfo *root, RelOptInfo *rel, } Node * -runtimemergeappend_create_scan_state(CustomScan *node) +runtime_merge_append_create_scan_state(CustomScan *node) { Node *state; state = create_append_scan_state_common(node, @@ -350,7 +356,7 @@ runtimemergeappend_create_scan_state(CustomScan *node) } void -runtimemergeappend_begin(CustomScanState *node, EState *estate, int eflags) +runtime_merge_append_begin(CustomScanState *node, EState *estate, int eflags) { begin_append_common(node, estate, eflags); } @@ -368,7 +374,8 @@ fetch_next_tuple(CustomScanState *node) for (i = 0; i < scan_state->rstate.ncur_plans; i++) { ChildScanCommon child = scan_state->rstate.cur_plans[i]; - PlanState *ps = child->content.plan_state; + + ps = child->content.plan_state; Assert(child->content_type == CHILD_PLAN_STATE); @@ -412,13 +419,13 @@ fetch_next_tuple(CustomScanState *node) } TupleTableSlot * -runtimemergeappend_exec(CustomScanState *node) +runtime_merge_append_exec(CustomScanState *node) { return exec_append_common(node, fetch_next_tuple); } void -runtimemergeappend_end(CustomScanState *node) +runtime_merge_append_end(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; @@ -429,7 +436,7 @@ runtimemergeappend_end(CustomScanState *node) } void -runtimemergeappend_rescan(CustomScanState *node) +runtime_merge_append_rescan(CustomScanState *node) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; int nplans; @@ -475,7 +482,7 @@ runtimemergeappend_rescan(CustomScanState *node) } void -runtimemergeappend_explain(CustomScanState *node, List *ancestors, ExplainState *es) +runtime_merge_append_explain(CustomScanState *node, List *ancestors, ExplainState *es) { RuntimeMergeAppendState *scan_state = (RuntimeMergeAppendState *) node; @@ -715,10 +722,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys, foreach(j, ec->ec_members) { - EquivalenceMember *em = (EquivalenceMember *) lfirst(j); List *exprvars; ListCell *k; + em = (EquivalenceMember *) lfirst(j); + /* * We shouldn't be trying to sort by an equivalence class that * contains a constant, so no need to consider such cases any @@ -892,9 +900,15 @@ show_sort_group_keys(PlanState *planstate, const char *qlabel, initStringInfo(&sortkeybuf); /* Set up deparsing context */ +#if PG_VERSION_NUM >= 130000 + context = set_deparse_context_plan(es->deparse_cxt, + plan, + ancestors); +#else context = set_deparse_context_planstate(es->deparse_cxt, (Node *) planstate, ancestors); +#endif useprefix = (list_length(es->rtable) > 1 || es->verbose); for (keyno = 0; keyno < nkeys; keyno++) diff --git a/src/utility_stmt_hooking.c b/src/utility_stmt_hooking.c index f05aae27..83bfa680 100644 --- a/src/utility_stmt_hooking.c +++ b/src/utility_stmt_hooking.c @@ -4,7 +4,7 @@ * Override COPY TO/FROM and ALTER TABLE ... RENAME statements * for partitioned tables * - * Copyright (c) 2016, Postgres Professional + * Copyright (c) 2016-2020, Postgres Professional * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -18,21 +18,35 @@ #include "partition_filter.h" #include "access/htup_details.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/table.h" +#endif #include "access/sysattr.h" #include "access/xact.h" #include "catalog/namespace.h" #include "commands/copy.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "commands/copyfrom_internal.h" +#endif +#include "commands/defrem.h" #include "commands/trigger.h" #include "commands/tablecmds.h" #include "foreign/fdwapi.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ +#include "parser/parse_relation.h" +#endif #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rls.h" -#include "libpq/libpq.h" - +/* we avoid includig libpq.h because it requires openssl.h */ +#include "libpq/pqcomm.h" +extern PGDLLIMPORT ProtocolVersion FrontendProtocol; +extern void pq_endmsgread(void); /* Determine whether we should enable COPY or not (PostgresPro has a fix) */ #if defined(WIN32) && \ @@ -55,15 +69,25 @@ ProtocolVersion FrontendProtocol = (ProtocolVersion) 0; #endif -static uint64 PathmanCopyFrom(CopyState cstate, +#define PATHMAN_COPY_READ_LOCK AccessShareLock +#define PATHMAN_COPY_WRITE_LOCK RowExclusiveLock + + +static uint64 PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif Relation parent_rel, List *range_table, bool old_protocol); -static void prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg); +static void prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); + +static void finish_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage); /* @@ -94,12 +118,16 @@ is_pathman_related_copy(Node *parsetree) /* Get partition's Oid while locking it */ parent_relid = RangeVarGetRelid(copy_stmt->relation, (copy_stmt->is_from ? - RowExclusiveLock : - AccessShareLock), - false); + PATHMAN_COPY_WRITE_LOCK : + PATHMAN_COPY_READ_LOCK), + true); + + /* Skip relation if it does not exist (for Citus compatibility) */ + if (!OidIsValid(parent_relid)) + return false; /* Check that relation is partitioned */ - if (get_pathman_relation_info(parent_relid)) + if (has_pathman_relation_info(parent_relid)) { ListCell *lc; @@ -108,10 +136,16 @@ is_pathman_related_copy(Node *parsetree) { DefElem *defel = (DefElem *) lfirst(lc); - Assert(IsA(defel, DefElem)); - /* We do not support freeze */ - if (strcmp(defel->defname, "freeze") == 0) + /* + * It would be great to allow copy.c extract option value and + * check it ready. However, there is no possibility (hooks) to do + * that before messaging 'ok, begin streaming data' to the client, + * which is ugly and confusing: e.g. it would require us to + * actually send something in regression tests before we notice + * the error. + */ + if (strcmp(defel->defname, "freeze") == 0 && defGetBoolean(defel)) elog(ERROR, "freeze is not supported for partitioned tables"); } @@ -134,18 +168,17 @@ is_pathman_related_copy(Node *parsetree) */ bool is_pathman_related_table_rename(Node *parsetree, - Oid *partition_relid_out) /* ret value */ + Oid *relation_oid_out, /* ret value #1 */ + bool *is_parent_out) /* ret value #2 */ { - RenameStmt *rename_stmt = (RenameStmt *) parsetree; - Oid partition_relid, - parent_relid; - const PartRelationInfo *prel; - PartParentSearch parent_search; + RenameStmt *rename_stmt = (RenameStmt *) parsetree; + Oid relation_oid, + parent_relid; Assert(IsPathmanReady()); /* Set default values */ - if (partition_relid_out) *partition_relid_out = InvalidOid; + if (relation_oid_out) *relation_oid_out = InvalidOid; if (!IsA(parsetree, RenameStmt)) return false; @@ -154,20 +187,37 @@ is_pathman_related_table_rename(Node *parsetree, if (rename_stmt->renameType != OBJECT_TABLE) return false; - /* Assume it's a partition, fetch its Oid */ - partition_relid = RangeVarGetRelid(rename_stmt->relation, - AccessShareLock, - false); + /* Fetch Oid of this relation */ + relation_oid = RangeVarGetRelid(rename_stmt->relation, + AccessShareLock, + rename_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (rename_stmt->missing_ok && relation_oid == InvalidOid) + return false; + + /* Assume it's a parent */ + if (has_pathman_relation_info(relation_oid)) + { + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = true; + return true; + } - /* Try fetching parent of this table */ - parent_relid = get_parent_of_partition(partition_relid, &parent_search); - if (parent_search != PPS_ENTRY_PART_PARENT) + /* Assume it's a partition, fetch its parent */ + parent_relid = get_parent_of_partition(relation_oid); + if (!OidIsValid(parent_relid)) return false; /* Is parent partitioned? */ - if ((prel = get_pathman_relation_info(parent_relid)) != NULL) + if (has_pathman_relation_info(parent_relid)) { - if (partition_relid_out) *partition_relid_out = partition_relid; + if (relation_oid_out) + *relation_oid_out = relation_oid; + if (is_parent_out) + *is_parent_out = false; return true; } @@ -183,10 +233,11 @@ is_pathman_related_alter_column_type(Node *parsetree, AttrNumber *attr_number_out, PartType *part_type_out) { - AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; - ListCell *lc; - Oid parent_relid; - const PartRelationInfo *prel; + AlterTableStmt *alter_table_stmt = (AlterTableStmt *) parsetree; + ListCell *lc; + Oid parent_relid; + bool result = false; + PartRelationInfo *prel; Assert(IsPathmanReady()); @@ -194,13 +245,21 @@ is_pathman_related_alter_column_type(Node *parsetree, return false; /* Are we going to modify some table? */ +#if PG_VERSION_NUM >= 140000 + if (alter_table_stmt->objtype != OBJECT_TABLE) +#else if (alter_table_stmt->relkind != OBJECT_TABLE) +#endif return false; /* Assume it's a parent, fetch its Oid */ parent_relid = RangeVarGetRelid(alter_table_stmt->relation, AccessShareLock, - false); + alter_table_stmt->missing_ok); + + /* Check ALTER TABLE ... IF EXISTS of nonexistent table */ + if (alter_table_stmt->missing_ok && parent_relid == InvalidOid) + return false; /* Is parent partitioned? */ if ((prel = get_pathman_relation_info(parent_relid)) != NULL) @@ -235,16 +294,16 @@ is_pathman_related_alter_column_type(Node *parsetree, if (attr_number_out) *attr_number_out = attnum; /* Success! */ - return true; + result = true; } - /* Default failure */ - return false; -} + close_pathman_relation_info(prel); + return result; +} /* - * CopyGetAttnums - build an integer list of attnums to be copied + * PathmanCopyGetAttnums - build an integer list of attnums to be copied * * The input attnamelist is either the user-specified column list, * or NIL if there was none (in which case we want all the non-dropped @@ -253,20 +312,19 @@ is_pathman_related_alter_column_type(Node *parsetree, * rel can be NULL ... it's only used for error reports. */ static List * -CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) +PathmanCopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) { List *attnums = NIL; if (attnamelist == NIL) { /* Generate default column list */ - Form_pg_attribute *attr = tupDesc->attrs; int attr_count = tupDesc->natts; int i; for (i = 0; i < attr_count; i++) { - if (attr[i]->attisdropped) + if (TupleDescAttr(tupDesc, i)->attisdropped) continue; attnums = lappend_int(attnums, i + 1); } @@ -286,11 +344,13 @@ CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) attnum = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { - if (tupDesc->attrs[i]->attisdropped) + Form_pg_attribute att = TupleDescAttr(tupDesc, i); + + if (att->attisdropped) continue; - if (namestrcmp(&(tupDesc->attrs[i]->attname), name) == 0) + if (namestrcmp(&(att->attname), name) == 0) { - attnum = tupDesc->attrs[i]->attnum; + attnum = att->attnum; break; } } @@ -331,13 +391,17 @@ PathmanDoCopy(const CopyStmt *stmt, int stmt_len, uint64 *processed) { +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate; +#else CopyState cstate; - bool is_from = stmt->is_from; - bool pipe = (stmt->filename == NULL); +#endif + ParseState *pstate; Relation rel; - Node *query = NULL; List *range_table = NIL; - ParseState *pstate; + bool is_from = stmt->is_from, + pipe = (stmt->filename == NULL), + is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && pipe; /* Disallow COPY TO/FROM file or program except to superusers. */ if (!pipe && !superuser()) @@ -356,6 +420,9 @@ PathmanDoCopy(const CopyStmt *stmt, "psql's \\copy command also works for anyone."))); } + pstate = make_parsestate(NULL); + pstate->p_sourcetext = queryString; + /* Check that we have a relation */ if (stmt->relation) { @@ -364,21 +431,43 @@ PathmanDoCopy(const CopyStmt *stmt, List *attnums; ListCell *cur; RangeTblEntry *rte; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + RTEPermissionInfo *perminfo; +#endif Assert(!stmt->query); /* Open the relation (we've locked it in is_pathman_related_copy()) */ - rel = heap_openrv(stmt->relation, NoLock); + rel = heap_openrv_compat(stmt->relation, NoLock); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + pstate->p_rtable = lappend(pstate->p_rtable, rte); + perminfo = addRTEPermissionInfo(&pstate->p_rteperminfos, rte); + perminfo->requiredPerms = required_access; +#else rte->requiredPerms = required_access; +#endif range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); - attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); + attnums = PathmanCopyGetAttnums(tupDesc, rel, stmt->attlist); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + foreach(cur, attnums) + { + int attno; + Bitmapset **bms; + + attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; + bms = is_from ? &perminfo->insertedCols : &perminfo->selectedCols; + + *bms = bms_add_member(*bms, attno); + } + ExecCheckPermissions(pstate->p_rtable, list_make1(perminfo), true); +#else foreach(cur, attnums) { int attnum = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; @@ -389,119 +478,36 @@ PathmanDoCopy(const CopyStmt *stmt, rte->selectedCols = bms_add_member(rte->selectedCols, attnum); } ExecCheckRTPerms(range_table, true); +#endif - /* - * We should perform a query instead of low-level heap scan whenever: - * a) table has a RLS policy; - * b) table is partitioned & it's COPY FROM. - */ - if (check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED || - is_from == false) /* rewrite COPY table TO statements */ + /* Disable COPY FROM if table has RLS */ + if (is_from && check_enable_rls(rte->relid, InvalidOid, false) == RLS_ENABLED) { - SelectStmt *select; - RangeVar *from; - List *target_list = NIL; - - if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY FROM not supported with row-level security"), errhint("Use INSERT statements instead."))); + } - /* Build target list */ - if (!stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - cr = makeNode(ColumnRef); - cr->fields = list_make1(makeNode(A_Star)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - target_list = list_make1(target); - } - else - { - ListCell *lc; - - foreach(lc, stmt->attlist) - { - ColumnRef *cr; - ResTarget *target; - - /* - * Build the ColumnRef for each column. The ColumnRef - * 'fields' property is a String 'Value' node (see - * nodes/value.h) that corresponds to the column name - * respectively. - */ - cr = makeNode(ColumnRef); - cr->fields = list_make1(lfirst(lc)); - cr->location = -1; - - /* Build the ResTarget and add the ColumnRef to it. */ - target = makeNode(ResTarget); - target->name = NULL; - target->indirection = NIL; - target->val = (Node *) cr; - target->location = -1; - - /* Add each column to the SELECT statements target list */ - target_list = lappend(target_list, target); - } - } - - /* - * Build RangeVar for from clause, fully qualified based on the - * relation which we have opened and locked. - */ - from = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)), - RelationGetRelationName(rel), -1); - - /* Build query */ - select = makeNode(SelectStmt); - select->targetList = target_list; - select->fromClause = list_make1(from); - - query = (Node *) select; - - /* - * Close the relation for now, but keep the lock on it to prevent - * changes between now and when we start the query-based COPY. - * - * We'll reopen it later as part of the query-based COPY. - */ - heap_close(rel, NoLock); - rel = NULL; + /* Disable COPY TO */ + if (!is_from) + { + ereport(WARNING, + (errmsg("COPY TO will only select rows from parent table \"%s\"", + RelationGetRelationName(rel)), + errhint("Consider using the COPY (SELECT ...) TO variant."))); } } /* This should never happen (see is_pathman_related_copy()) */ else elog(ERROR, "error in function " CppAsString(PathmanDoCopy)); - pstate = make_parsestate(NULL); - pstate->p_sourcetext = queryString; - - /* COPY ... FROM ... */ if (is_from) { - bool is_old_protocol = PG_PROTOCOL_MAJOR(FrontendProtocol) < 3 && - stmt->filename == NULL; - - /* There should be relation */ - if (!rel) elog(FATAL, "No relation for PATHMAN COPY FROM"); - /* check read-only transaction and parallel mode */ if (XactReadOnly && !rel->rd_islocaltemp) - PreventCommandIfReadOnly("PATHMAN COPY FROM"); - PreventCommandIfParallelMode("PATHMAN COPY FROM"); + PreventCommandIfReadOnly("COPY FROM"); + PreventCommandIfParallelMode("COPY FROM"); cstate = BeginCopyFromCompat(pstate, rel, stmt->filename, stmt->is_program, NULL, stmt->attlist, @@ -509,38 +515,35 @@ PathmanDoCopy(const CopyStmt *stmt, *processed = PathmanCopyFrom(cstate, rel, range_table, is_old_protocol); EndCopyFrom(cstate); } - /* COPY ... TO ... */ else { - CopyStmt modified_copy_stmt; - - /* We should've created a query */ - Assert(query); - - /* Copy 'stmt' and override some of the fields */ - modified_copy_stmt = *stmt; - modified_copy_stmt.relation = NULL; - modified_copy_stmt.query = query; - +#if PG_VERSION_NUM >= 160000 /* for commit f75cec4fff87 */ + /* + * Forget current RangeTblEntries and RTEPermissionInfos. + * Standard DoCopy will create new ones. + */ + pstate->p_rtable = NULL; + pstate->p_rteperminfos = NULL; +#endif /* Call standard DoCopy using a new CopyStmt */ - DoCopyCompat(pstate, &modified_copy_stmt, stmt_location, stmt_len, - processed); + DoCopyCompat(pstate, stmt, stmt_location, stmt_len, processed); } - /* - * Close the relation. If reading, we can release the AccessShareLock we - * got; if writing, we should hold the lock until end of transaction to - * ensure that updates will be committed before lock is released. - */ - if (rel != NULL) - heap_close(rel, (is_from ? NoLock : AccessShareLock)); + /* Close the relation, but keep it locked */ + heap_close_compat(rel, (is_from ? NoLock : PATHMAN_COPY_READ_LOCK)); } /* * Copy FROM file to relation. */ static uint64 -PathmanCopyFrom(CopyState cstate, Relation parent_rel, +PathmanCopyFrom( +#if PG_VERSION_NUM >= 140000 /* Structure changed in c532d15dddff */ + CopyFromState cstate, +#else + CopyState cstate, +#endif + Relation parent_rel, List *range_table, bool old_protocol) { HeapTuple tuple; @@ -549,44 +552,79 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, bool *nulls; ResultPartsStorage parts_storage; - ResultRelInfo *parent_result_rel; + ResultRelInfo *parent_rri; + Oid parent_relid = RelationGetRelid(parent_rel); + MemoryContext query_mcxt = CurrentMemoryContext; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ - ExprContext *econtext; TupleTableSlot *myslot; - MemoryContext oldcontext = CurrentMemoryContext; - - Node *expr = NULL; - ExprState *expr_state = NULL; uint64 processed = 0; - tupDesc = RelationGetDescr(parent_rel); - parent_result_rel = makeNode(ResultRelInfo); - InitResultRelInfoCompat(parent_result_rel, + parent_rri = makeNode(ResultRelInfo); + InitResultRelInfoCompat(parent_rri, parent_rel, 1, /* dummy rangetable index */ 0); - ExecOpenIndices(parent_result_rel, false); + ExecOpenIndices(parent_rri, false); - estate->es_result_relations = parent_result_rel; +#if PG_VERSION_NUM >= 140000 /* reworked in 1375422c7826 */ + /* + * Call ExecInitRangeTable() should be first because in 14+ it initializes + * field "estate->es_result_relations": + */ +#if PG_VERSION_NUM >= 160000 + ExecInitRangeTable(estate, range_table, cstate->rteperminfos); +#else + ExecInitRangeTable(estate, range_table); +#endif + estate->es_result_relations = + (ResultRelInfo **) palloc0(list_length(range_table) * sizeof(ResultRelInfo *)); + estate->es_result_relations[0] = parent_rri; + /* + * Saving in the list allows to avoid needlessly traversing the whole + * array when only a few of its entries are possibly non-NULL. + */ + estate->es_opened_result_relations = + lappend(estate->es_opened_result_relations, parent_rri); + estate->es_result_relation_info = parent_rri; +#else + estate->es_result_relations = parent_rri; estate->es_num_result_relations = 1; - estate->es_result_relation_info = parent_result_rel; + estate->es_result_relation_info = parent_rri; +#if PG_VERSION_NUM >= 120000 + ExecInitRangeTable(estate, range_table); +#else estate->es_range_table = range_table; - +#endif +#endif /* Initialize ResultPartsStorage */ - init_result_parts_storage(&parts_storage, estate, false, - ResultPartsStorageStandard, - prepare_rri_for_copy, NULL); - parts_storage.saved_rel_info = parent_result_rel; + init_result_parts_storage(&parts_storage, + parent_relid, parent_rri, + estate, CMD_INSERT, + RPS_CLOSE_RELATIONS, + RPS_DEFAULT_SPECULATIVE, + RPS_RRI_CB(prepare_rri_for_copy, cstate), + RPS_RRI_CB(finish_rri_for_copy, NULL)); +#if PG_VERSION_NUM >= 160000 /* for commit a61b1f74823c */ + /* ResultRelInfo of partitioned table. */ + parts_storage.init_rri = parent_rri; + + /* + * Copy the RTEPermissionInfos into estate as well, so that + * scan_result_parts_storage() et al will work correctly. + */ + estate->es_rteperminfos = cstate->rteperminfos; +#endif /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate); - ExecSetSlotDescriptor(myslot, tupDesc); + myslot = ExecInitExtraTupleSlotCompat(estate, NULL, &TTSOpsHeapTuple); /* Triggers might need a slot as well */ - estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); +#if PG_VERSION_NUM < 120000 + estate->es_trig_tuple_slot = ExecInitExtraTupleSlotCompat(estate, tupDesc, nothing_here); +#endif /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); @@ -597,113 +635,107 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, * such. However, executing these triggers maintains consistency with the * EACH ROW triggers that we already fire on COPY. */ - ExecBSInsertTriggers(estate, parent_result_rel); + ExecBSInsertTriggers(estate, parent_rri); values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); - econtext = GetPerTupleExprContext(estate); - for (;;) { - TupleTableSlot *slot, - *tmp_slot; - bool skip_tuple, - isnull; + TupleTableSlot *slot; + bool skip_tuple = false; +#if PG_VERSION_NUM < 120000 Oid tuple_oid = InvalidOid; - Datum value; +#endif + ExprContext *econtext = GetPerTupleExprContext(estate); - const PartRelationInfo *prel; ResultRelInfoHolder *rri_holder; - ResultRelInfo *child_result_rel; + ResultRelInfo *child_rri; CHECK_FOR_INTERRUPTS(); ResetPerTupleExprContext(estate); - /* Fetch PartRelationInfo for parent relation */ - prel = get_pathman_relation_info(RelationGetRelid(parent_rel)); - - /* Initialize expression and expression state */ - if (expr == NULL) - { - expr = copyObject(prel->expr); - expr_state = ExecInitExpr((Expr *) expr, NULL); - } - /* Switch into per tuple memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - if (!NextCopyFrom(cstate, econtext, values, nulls, &tuple_oid)) + if (!NextCopyFromCompat(cstate, econtext, values, nulls, &tuple_oid)) break; - /* We can form the input tuple. */ + /* We can form the input tuple */ tuple = heap_form_tuple(tupDesc, values, nulls); +#if PG_VERSION_NUM < 120000 if (tuple_oid != InvalidOid) HeapTupleSetOid(tuple, tuple_oid); +#endif /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecSetSlotDescriptor(slot, tupDesc); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif - /* Execute expression */ - tmp_slot = econtext->ecxt_scantuple; - econtext->ecxt_scantuple = slot; - value = ExecEvalExprCompat(expr_state, econtext, &isnull, - mult_result_handler); - econtext->ecxt_scantuple = tmp_slot; - - if (isnull) - elog(ERROR, ERR_PART_ATTR_NULL); + /* Search for a matching partition */ + rri_holder = select_partition_for_insert(estate, &parts_storage, slot); + child_rri = rri_holder->result_rel_info; - /* - * Search for a matching partition. - * WARNING: 'prel' might change after this call! - */ - rri_holder = select_partition_for_insert(value, - prel->ev_type, prel, - &parts_storage, estate); - child_result_rel = rri_holder->result_rel_info; - estate->es_result_relation_info = child_result_rel; + /* Magic: replace parent's ResultRelInfo with ours */ + estate->es_result_relation_info = child_rri; /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ - tuple->t_tableOid = RelationGetRelid(child_result_rel->ri_RelationDesc); + tuple->t_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); /* If there's a transform map, rebuild the tuple */ if (rri_holder->tuple_map) { HeapTuple tuple_old; - /* TODO: use 'tuple_map' directly instead of do_convert_tuple() */ tuple_old = tuple; +#if PG_VERSION_NUM >= 120000 + tuple = execute_attr_map_tuple(tuple, rri_holder->tuple_map); +#else tuple = do_convert_tuple(tuple, rri_holder->tuple_map); +#endif heap_freetuple(tuple_old); } - /* now we can set proper tuple descriptor according to child relation */ - ExecSetSlotDescriptor(slot, RelationGetDescr(child_result_rel->ri_RelationDesc)); + /* Now we can set proper tuple descriptor according to child relation */ + ExecSetSlotDescriptor(slot, RelationGetDescr(child_rri->ri_RelationDesc)); +#if PG_VERSION_NUM >= 120000 + ExecStoreHeapTuple(tuple, slot, false); +#else ExecStoreTuple(tuple, slot, InvalidBuffer, false); +#endif /* Triggers and stuff need to be invoked in query context. */ - MemoryContextSwitchTo(oldcontext); - - skip_tuple = false; + MemoryContextSwitchTo(query_mcxt); /* BEFORE ROW INSERT Triggers */ - if (child_result_rel->ri_TrigDesc && - child_result_rel->ri_TrigDesc->trig_insert_before_row) + if (child_rri->ri_TrigDesc && + child_rri->ri_TrigDesc->trig_insert_before_row) { - slot = ExecBRInsertTriggers(estate, child_result_rel, slot); +#if PG_VERSION_NUM >= 120000 + if (!ExecBRInsertTriggers(estate, child_rri, slot)) + skip_tuple = true; + else /* trigger might have changed tuple */ + tuple = ExecFetchSlotHeapTuple(slot, false, NULL); +#else + slot = ExecBRInsertTriggers(estate, child_rri, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; else /* trigger might have changed tuple */ + { tuple = ExecMaterializeSlot(slot); + } +#endif } /* Proceed if we still have a tuple */ @@ -712,19 +744,43 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, List *recheckIndexes = NIL; /* Check the constraints of the tuple */ - if (child_result_rel->ri_RelationDesc->rd_att->constr) - ExecConstraints(child_result_rel, slot, estate); + if (child_rri->ri_RelationDesc->rd_att->constr) + ExecConstraints(child_rri, slot, estate); - /* OK, store the tuple and create index entries for it */ - simple_heap_insert(child_result_rel->ri_RelationDesc, tuple); + /* Handle local tables */ + if (!child_rri->ri_FdwRoutine) + { + /* OK, now store the tuple... */ + simple_heap_insert(child_rri->ri_RelationDesc, tuple); +#if PG_VERSION_NUM >= 120000 /* since 12, tid lives directly in slot */ + ItemPointerCopy(&tuple->t_self, &slot->tts_tid); + /* and we must stamp tableOid as we go around table_tuple_insert */ + slot->tts_tableOid = RelationGetRelid(child_rri->ri_RelationDesc); +#endif - if (child_result_rel->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), - estate, false, NULL, NIL); + /* ... and create index entries for it */ + if (child_rri->ri_NumIndices > 0) + recheckIndexes = ExecInsertIndexTuplesCompat(estate->es_result_relation_info, + slot, &(tuple->t_self), estate, false, false, NULL, NIL, false); + } +#ifdef PG_SHARDMAN + /* Handle foreign tables */ + else + { + child_rri->ri_FdwRoutine->ForeignNextCopyFrom(estate, + child_rri, + cstate); + } +#endif - /* AFTER ROW INSERT Triggers */ - ExecARInsertTriggers(estate, child_result_rel, tuple, - recheckIndexes); + /* AFTER ROW INSERT Triggers (FIXME: NULL transition) */ +#if PG_VERSION_NUM >= 120000 + ExecARInsertTriggersCompat(estate, child_rri, slot, + recheckIndexes, NULL); +#else + ExecARInsertTriggersCompat(estate, child_rri, tuple, + recheckIndexes, NULL); +#endif list_free(recheckIndexes); @@ -737,17 +793,15 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, } } - MemoryContextSwitchTo(oldcontext); + /* Switch back to query context */ + MemoryContextSwitchTo(query_mcxt); - /* - * In the old protocol, tell pqcomm that we can process normal protocol - * messages again. - */ + /* Required for old protocol */ if (old_protocol) pq_endmsgread(); - /* Execute AFTER STATEMENT insertion triggers */ - ExecASInsertTriggers(estate, parent_result_rel); + /* Execute AFTER STATEMENT insertion triggers (FIXME: NULL transition) */ + ExecASInsertTriggersCompat(estate, parent_rri, NULL); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); @@ -755,46 +809,84 @@ PathmanCopyFrom(CopyState cstate, Relation parent_rel, pfree(values); pfree(nulls); + /* Release resources for tuple table */ ExecResetTupleTable(estate->es_tupleTable, false); /* Close partitions and destroy hash table */ - fini_result_parts_storage(&parts_storage, true); + fini_result_parts_storage(&parts_storage); /* Close parent's indices */ - ExecCloseIndices(parent_result_rel); + ExecCloseIndices(parent_rri); + /* Release an EState along with all remaining working storage */ FreeExecutorState(estate); return processed; } /* - * COPY FROM does not support FDWs, emit ERROR. + * Init COPY FROM, if supported. */ static void -prepare_rri_for_copy(EState *estate, - ResultRelInfoHolder *rri_holder, - const ResultPartsStorage *rps_storage, - void *arg) +prepare_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) { - ResultRelInfo *rri = rri_holder->result_rel_info; - FdwRoutine *fdw_routine = rri->ri_FdwRoutine; + ResultRelInfo *rri = rri_holder->result_rel_info; + FdwRoutine *fdw_routine = rri->ri_FdwRoutine; if (fdw_routine != NULL) + { + /* + * If this PostgreSQL edition has no idea about shardman, behave as usual: + * vanilla Postgres doesn't support COPY FROM to foreign partitions. + * However, shardman patches to core extend FDW API to allow it. + */ +#ifdef PG_SHARDMAN + /* shardman COPY FROM requested? */ + if (*find_rendezvous_variable( + "shardman_pathman_copy_from_rendezvous") != NULL && + FdwCopyFromIsSupported(fdw_routine)) + { + CopyState cstate = (CopyState) rps_storage->init_rri_holder_cb_arg; + ResultRelInfo *parent_rri = rps_storage->base_rri; + EState *estate = rps_storage->estate; + + fdw_routine->BeginForeignCopyFrom(estate, rri, cstate, parent_rri); + return; + } +#endif + elog(ERROR, "cannot copy to foreign partition \"%s\"", get_rel_name(RelationGetRelid(rri->ri_RelationDesc))); + } +} + +/* + * Shutdown FDWs. + */ +static void +finish_rri_for_copy(ResultRelInfoHolder *rri_holder, + const ResultPartsStorage *rps_storage) +{ +#ifdef PG_SHARDMAN + ResultRelInfo *resultRelInfo = rri_holder->result_rel_info; + + if (resultRelInfo->ri_FdwRoutine) + resultRelInfo->ri_FdwRoutine->EndForeignCopyFrom(rps_storage->estate, + resultRelInfo); +#endif } /* * Rename RANGE\HASH check constraint of a partition on table rename event. */ void -PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ - const RenameStmt *part_rename_stmt) /* partition rename stmt */ +PathmanRenameConstraint(Oid partition_relid, /* partition Oid */ + const RenameStmt *rename_stmt) /* partition rename stmt */ { char *old_constraint_name, *new_constraint_name; - RenameStmt rename_stmt; + RenameStmt rename_con_stmt; /* Generate old constraint name */ old_constraint_name = @@ -802,16 +894,61 @@ PathmanRenameConstraint(Oid partition_relid, /* cached partition Oid */ /* Generate new constraint name */ new_constraint_name = - build_check_constraint_name_relname_internal(part_rename_stmt->newname); + build_check_constraint_name_relname_internal(rename_stmt->newname); /* Build check constraint RENAME statement */ - memset((void *) &rename_stmt, 0, sizeof(RenameStmt)); - NodeSetTag(&rename_stmt, T_RenameStmt); - rename_stmt.renameType = OBJECT_TABCONSTRAINT; - rename_stmt.relation = part_rename_stmt->relation; - rename_stmt.subname = old_constraint_name; - rename_stmt.newname = new_constraint_name; - rename_stmt.missing_ok = false; - - RenameConstraint(&rename_stmt); + memset((void *) &rename_con_stmt, 0, sizeof(RenameStmt)); + NodeSetTag(&rename_con_stmt, T_RenameStmt); + rename_con_stmt.renameType = OBJECT_TABCONSTRAINT; + rename_con_stmt.relation = rename_stmt->relation; + rename_con_stmt.subname = old_constraint_name; + rename_con_stmt.newname = new_constraint_name; + rename_con_stmt.missing_ok = false; + + /* Finally, rename partitioning constraint */ + RenameConstraint(&rename_con_stmt); + + pfree(old_constraint_name); + pfree(new_constraint_name); + + /* Make changes visible */ + CommandCounterIncrement(); +} + +/* + * Rename auto naming sequence of a parent on table rename event. + */ +void +PathmanRenameSequence(Oid parent_relid, /* parent Oid */ + const RenameStmt *rename_stmt) /* parent rename stmt */ +{ + char *old_seq_name, + *new_seq_name, + *seq_nsp_name; + RangeVar *seq_rv; + Oid seq_relid; + + /* Produce old & new names and RangeVar */ + seq_nsp_name = get_namespace_name(get_rel_namespace(parent_relid)); + old_seq_name = build_sequence_name_relid_internal(parent_relid); + new_seq_name = build_sequence_name_relname_internal(rename_stmt->newname); + seq_rv = makeRangeVar(seq_nsp_name, old_seq_name, -1); + + /* Fetch Oid of sequence */ + seq_relid = RangeVarGetRelid(seq_rv, AccessExclusiveLock, true); + + /* Do nothing if there's no naming sequence */ + if (!OidIsValid(seq_relid)) + return; + + /* Finally, rename auto naming sequence */ + RenameRelationInternalCompat(seq_relid, new_seq_name, false, false); + + pfree(seq_nsp_name); + pfree(old_seq_name); + pfree(new_seq_name); + pfree(seq_rv); + + /* Make changes visible */ + CommandCounterIncrement(); } diff --git a/src/utils.c b/src/utils.c index 6f9e53cd..9402d618 100644 --- a/src/utils.c +++ b/src/utils.c @@ -10,24 +10,23 @@ * ------------------------------------------------------------------------ */ +#include "pathman.h" #include "utils.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/sysattr.h" -#include "access/xact.h" -#include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" -#include "catalog/pg_extension.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" -#include "commands/extension.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_coerce.h" #include "parser/parse_oper.h" +#include "utils/array.h" #include "utils/builtins.h" +#include "utils/datetime.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" @@ -37,6 +36,21 @@ #include "utils/regproc.h" #endif +static const Node * +drop_irrelevant_expr_wrappers(const Node *expr) +{ + switch (nodeTag(expr)) + { + /* Strip relabeling */ + case T_RelabelType: + return (const Node *) ((const RelabelType *) expr)->arg; + + /* no special actions required */ + default: + return expr; + } +} + static bool clause_contains_params_walker(Node *node, void *context) { @@ -110,65 +124,16 @@ check_security_policy_internal(Oid relid, Oid role) /* Compare clause operand with expression */ bool -match_expr_to_operand(Node *expr, Node *operand) +match_expr_to_operand(const Node *expr, const Node *operand) { - /* Strip relabeling for both operand and expr */ - if (operand && IsA(operand, RelabelType)) - operand = (Node *) ((RelabelType *) operand)->arg; - - if (expr && IsA(expr, RelabelType)) - expr = (Node *) ((RelabelType *) expr)->arg; + expr = drop_irrelevant_expr_wrappers(expr); + operand = drop_irrelevant_expr_wrappers(operand); /* compare expressions and return result right away */ return equal(expr, operand); } -/* - * Return pg_pathman schema's Oid or InvalidOid if that's not possible. - */ -Oid -get_pathman_schema(void) -{ - Oid result; - Relation rel; - SysScanDesc scandesc; - HeapTuple tuple; - ScanKeyData entry[1]; - Oid ext_schema; - - /* It's impossible to fetch pg_pathman's schema now */ - if (!IsTransactionState()) - return InvalidOid; - - ext_schema = get_extension_oid("pg_pathman", true); - if (ext_schema == InvalidOid) - return InvalidOid; /* exit if pg_pathman does not exist */ - - ScanKeyInit(&entry[0], - ObjectIdAttributeNumber, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ext_schema)); - - rel = heap_open(ExtensionRelationId, AccessShareLock); - scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, - NULL, 1, entry); - - tuple = systable_getnext(scandesc); - - /* We assume that there can be at most one matching tuple */ - if (HeapTupleIsValid(tuple)) - result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; - else - result = InvalidOid; - - systable_endscan(scandesc); - - heap_close(rel, AccessShareLock); - - return result; -} - List * list_reverse(List *l) { @@ -220,6 +185,19 @@ get_rel_name_or_relid(Oid relid) return relname; } +/* + * Return palloced fully qualified relation name as a cstring + */ +char * +get_qualified_rel_name(Oid relid) +{ + Oid nspid = get_rel_namespace(relid); + + return psprintf("%s.%s", + quote_identifier(get_namespace_name(nspid)), + quote_identifier(get_rel_name(relid))); +} + RangeVar * makeRangeVarFromRelid(Oid relid) { @@ -366,7 +344,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) if (IsBinaryCoercible(in_type, out_type)) return value; - /* If not, try to perfrom a type cast */ + /* If not, try to perform a type cast */ ret = find_coercion_pathway(out_type, in_type, COERCION_EXPLICIT, &castfunc); @@ -411,7 +389,7 @@ perform_type_cast(Datum value, Oid in_type, Oid out_type, bool *success) } /* - * Convert interval from TEXT to binary form using partitioninig expresssion type. + * Convert interval from TEXT to binary form using partitioninig expression type. */ Datum extract_binary_interval_from_text(Datum interval_text, /* interval as TEXT */ @@ -537,10 +515,10 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) /* Convert partition names into RangeVars */ if (relnames) { - rangevars = palloc(sizeof(RangeVar) * nrelnames); + rangevars = palloc(sizeof(RangeVar *) * nrelnames); for (i = 0; i < nrelnames; i++) { - List *nl = stringToQualifiedNameList(relnames[i]); + List *nl = stringToQualifiedNameListCompat(relnames[i]); rangevars[i] = makeRangeVarFromNameList(nl); } @@ -548,3 +526,15 @@ qualified_relnames_to_rangevars(char **relnames, size_t nrelnames) return rangevars; } + +/* + * Checks that Oid is valid (it need to do before relation locking: locking of + * invalid Oid causes an error on replica). + */ +void +check_relation_oid(Oid relid) +{ + if (relid < FirstNormalObjectId) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("identifier \"%u\" must be normal Oid", relid))); +} diff --git a/src/xact_handling.c b/src/xact_handling.c index 0d4ea5b0..31fb5d13 100644 --- a/src/xact_handling.c +++ b/src/xact_handling.c @@ -96,7 +96,7 @@ xact_is_level_read_committed(void) } /* - * Check if 'stmt' is BEGIN\ROLLBACK etc transaction statement. + * Check if 'stmt' is BEGIN/ROLLBACK/etc [TRANSACTION] statement. */ bool xact_is_transaction_stmt(Node *stmt) @@ -111,10 +111,10 @@ xact_is_transaction_stmt(Node *stmt) } /* - * Check if 'stmt' is SET [TRANSACTION] statement. + * Check if 'stmt' is SET ('name' | [TRANSACTION]) statement. */ bool -xact_is_set_stmt(Node *stmt) +xact_is_set_stmt(Node *stmt, const char *name) { /* Check that SET TRANSACTION is implemented via VariableSetStmt */ Assert(VAR_SET_MULTI > 0); @@ -122,8 +122,18 @@ xact_is_set_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, VariableSetStmt)) + if (!IsA(stmt, VariableSetStmt)) + return false; + + if (!name) return true; + else + { + char *set_name = ((VariableSetStmt *) stmt)->name; + + if (set_name && pg_strcasecmp(name, set_name) == 0) + return true; + } return false; } @@ -137,22 +147,23 @@ xact_is_alter_pathman_stmt(Node *stmt) if (!stmt) return false; - if (IsA(stmt, AlterExtensionStmt) && - 0 == strcmp(((AlterExtensionStmt *) stmt)->extname, - "pg_pathman")) + if (!IsA(stmt, AlterExtensionStmt)) + return false; + + if (pg_strcasecmp(((AlterExtensionStmt *) stmt)->extname, "pg_pathman") == 0) return true; return false; } /* - * Check if object is visible in newer transactions. + * Check if object is visible to newer transactions. */ bool xact_object_is_visible(TransactionId obj_xmin) { - return TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()) || - TransactionIdEquals(obj_xmin, FrozenTransactionId); + return TransactionIdEquals(obj_xmin, FrozenTransactionId) || + TransactionIdPrecedes(obj_xmin, GetCurrentTransactionId()); } /* @@ -201,23 +212,3 @@ SetLocktagRelationOid(LOCKTAG *tag, Oid relid) SET_LOCKTAG_RELATION(*tag, dbid, relid); } - - -/* - * Lock relation exclusively & check for current isolation level. - */ -void -prevent_data_modification_internal(Oid relid) -{ - /* - * Check that isolation level is READ COMMITTED. - * Else we won't be able to see new rows - * which could slip through locks. - */ - if (!xact_is_level_read_committed()) - ereport(ERROR, - (errmsg("Cannot perform blocking partitioning operation"), - errdetail("Expected READ COMMITTED isolation level"))); - - LockRelationOid(relid, AccessExclusiveLock); -} diff --git a/tests/cmocka/Makefile b/tests/cmocka/Makefile index d46ad869..5216a467 100644 --- a/tests/cmocka/Makefile +++ b/tests/cmocka/Makefile @@ -3,17 +3,17 @@ TOP_SRC_DIR = ../../src CC = gcc CFLAGS += -I $(TOP_SRC_DIR) -I $(shell $(PG_CONFIG) --includedir-server) -CFLAGS += -I$(CURDIR)/../../src/include +CFLAGS += -I$(CURDIR)/../../src/include -I. CFLAGS += $(shell $(PG_CONFIG) --cflags_sl) CFLAGS += $(shell $(PG_CONFIG) --cflags) CFLAGS += $(CFLAGS_SL) CFLAGS += $(PG_CPPFLAGS) -LDFLAGS = -lcmocka +CFLAGS += -D_GNU_SOURCE +LDFLAGS += -lcmocka TEST_BIN = rangeset_tests OBJ = missing_basic.o missing_list.o missing_stringinfo.o \ - missing_bitmapset.o rangeset_tests.o \ - $(TOP_SRC_DIR)/rangeset.o + missing_bitmapset.o rangeset_tests.o $(TOP_SRC_DIR)/rangeset.o all: build_extension $(TEST_BIN) diff --git a/tests/cmocka/cmocka-1.1.1.tar.xz b/tests/cmocka/cmocka-1.1.1.tar.xz deleted file mode 100644 index 7b25e7ff..00000000 Binary files a/tests/cmocka/cmocka-1.1.1.tar.xz and /dev/null differ diff --git a/tests/cmocka/missing_basic.c b/tests/cmocka/missing_basic.c index d6c3808e..d20eb87f 100644 --- a/tests/cmocka/missing_basic.c +++ b/tests/cmocka/missing_basic.c @@ -1,6 +1,7 @@ #include #include "postgres.h" +#include "undef_printf.h" void * @@ -15,23 +16,37 @@ repalloc(void *pointer, Size size) return realloc(pointer, size); } +void +pfree(void *pointer) +{ + free(pointer); +} void ExceptionalCondition(const char *conditionName, +#if PG_VERSION_NUM < 160000 const char *errorType, +#endif const char *fileName, int lineNumber) { - if (!PointerIsValid(conditionName) || - !PointerIsValid(fileName) || - !PointerIsValid(errorType)) + if (!PointerIsValid(conditionName) || !PointerIsValid(fileName) +#if PG_VERSION_NUM < 160000 + || !PointerIsValid(errorType) +#endif + ) { printf("TRAP: ExceptionalCondition: bad arguments\n"); } else { printf("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n", - errorType, conditionName, +#if PG_VERSION_NUM < 160000 + errorType, +#else + "", +#endif + conditionName, fileName, lineNumber); } diff --git a/tests/cmocka/missing_bitmapset.c b/tests/cmocka/missing_bitmapset.c index 7e986d5a..84e7e771 100644 --- a/tests/cmocka/missing_bitmapset.c +++ b/tests/cmocka/missing_bitmapset.c @@ -1,4 +1,5 @@ #include "postgres.h" +#include "undef_printf.h" #include "nodes/bitmapset.h" diff --git a/tests/cmocka/missing_list.c b/tests/cmocka/missing_list.c index 9c07bc10..b85eed94 100644 --- a/tests/cmocka/missing_list.c +++ b/tests/cmocka/missing_list.c @@ -1,10 +1,10 @@ /*------------------------------------------------------------------------- * * list.c - * implementation for PostgreSQL generic linked list package + * implementation for PostgreSQL generic list package * * - * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -14,8 +14,10 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "nodes/pg_list.h" +#if PG_VERSION_NUM < 130000 #define IsPointerList(l) ((l) == NIL || IsA((l), List)) #define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) @@ -140,3 +142,306 @@ lcons(void *datum, List *list) return list; } + +#else /* PG_VERSION_NUM >= 130000 */ + +/*------------------------------------------------------------------------- + * + * This was taken from src/backend/nodes/list.c PostgreSQL-13 source code. + * We only need lappend() and lcons() and their dependencies. + * There is one change: we use palloc() instead MemoryContextAlloc() in + * enlarge_list() (see #defines). + * + *------------------------------------------------------------------------- + */ +#include "port/pg_bitutils.h" +#include "utils/memdebug.h" +#include "utils/memutils.h" + +#define MemoryContextAlloc(c, s) palloc(s) +#define GetMemoryChunkContext(l) 0 + +/* + * The previous List implementation, since it used a separate palloc chunk + * for each cons cell, had the property that adding or deleting list cells + * did not move the storage of other existing cells in the list. Quite a + * bit of existing code depended on that, by retaining ListCell pointers + * across such operations on a list. There is no such guarantee in this + * implementation, so instead we have debugging support that is meant to + * help flush out now-broken assumptions. Defining DEBUG_LIST_MEMORY_USAGE + * while building this file causes the List operations to forcibly move + * all cells in a list whenever a cell is added or deleted. In combination + * with MEMORY_CONTEXT_CHECKING and/or Valgrind, this can usually expose + * broken code. It's a bit expensive though, as there's many more palloc + * cycles and a lot more data-copying than in a default build. + * + * By default, we enable this when building for Valgrind. + */ +#ifdef USE_VALGRIND +#define DEBUG_LIST_MEMORY_USAGE +#endif + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD \ + ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Macros to simplify writing assertions about the type of a list; a + * NIL list is considered to be an empty list of any type. + */ +#define IsPointerList(l) ((l) == NIL || IsA((l), List)) +#define IsIntegerList(l) ((l) == NIL || IsA((l), IntList)) +#define IsOidList(l) ((l) == NIL || IsA((l), OidList)) + +#ifdef USE_ASSERT_CHECKING +/* + * Check that the specified List is valid (so far as we can tell). + */ +static void +check_list_invariants(const List *list) +{ + if (list == NIL) + return; + + Assert(list->length > 0); + Assert(list->length <= list->max_length); + Assert(list->elements != NULL); + + Assert(list->type == T_List || + list->type == T_IntList || + list->type == T_OidList); +} +#else +#define check_list_invariants(l) ((void) 0) +#endif /* USE_ASSERT_CHECKING */ + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +static List * +new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} + +/* + * Enlarge an existing non-NIL List to have room for at least min_size cells. + * + * This does *not* update list->length, as some callers would find that + * inconvenient. (list->length had better be the correct number of existing + * valid cells, though.) + */ +static void +enlarge_list(List *list, int min_size) +{ + int new_max_len; + + Assert(min_size > list->max_length); /* else we shouldn't be here */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * As above, we prefer power-of-two total allocations; but here we need + * not account for list header overhead. + */ + + /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */ + new_max_len = pg_nextpower2_32(Max(16, min_size)); + +#else + /* As above, don't allocate anything extra */ + new_max_len = min_size; +#endif + + if (list->elements == list->initial_elements) + { + /* + * Replace original in-line allocation with a separate palloc block. + * Ensure it is in the same memory context as the List header. (The + * previous List implementation did not offer any guarantees about + * keeping all list cells in the same context, but it seems reasonable + * to create such a guarantee now.) + */ + list->elements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(list->elements, list->initial_elements, + list->length * sizeof(ListCell)); + + /* + * We must not move the list header, so it's unsafe to try to reclaim + * the initial_elements[] space via repalloc. In debugging builds, + * however, we can clear that space and/or mark it inaccessible. + * (wipe_mem includes VALGRIND_MAKE_MEM_NOACCESS.) + */ +#ifdef CLOBBER_FREED_MEMORY + wipe_mem(list->initial_elements, + list->max_length * sizeof(ListCell)); +#else + VALGRIND_MAKE_MEM_NOACCESS(list->initial_elements, + list->max_length * sizeof(ListCell)); +#endif + } + else + { +#ifndef DEBUG_LIST_MEMORY_USAGE + /* Normally, let repalloc deal with enlargement */ + list->elements = (ListCell *) repalloc(list->elements, + new_max_len * sizeof(ListCell)); +#else + /* + * repalloc() might enlarge the space in-place, which we don't want + * for debugging purposes, so forcibly move the data somewhere else. + */ + ListCell *newelements; + + newelements = (ListCell *) + MemoryContextAlloc(GetMemoryChunkContext(list), + new_max_len * sizeof(ListCell)); + memcpy(newelements, list->elements, + list->length * sizeof(ListCell)); + pfree(list->elements); + list->elements = newelements; +#endif + } + + list->max_length = new_max_len; +} + +/* + * Make room for a new head cell in the given (non-NIL) list. + * + * The data in the new head cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_head_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + /* Now shove the existing data over */ + memmove(&list->elements[1], &list->elements[0], + list->length * sizeof(ListCell)); + list->length++; +} + +/* + * Make room for a new tail cell in the given (non-NIL) list. + * + * The data in the new tail cell is undefined; the caller should be + * sure to fill it in + */ +static void +new_tail_cell(List *list) +{ + /* Enlarge array if necessary */ + if (list->length >= list->max_length) + enlarge_list(list, list->length + 1); + list->length++; +} + +/* + * Append a pointer to the list. A pointer to the modified list is + * returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * first argument. + */ +List * +lappend(List *list, void *datum) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_tail_cell(list); + + lfirst(list_tail(list)) = datum; + check_list_invariants(list); + return list; +} + +/* + * Prepend a new element to the list. A pointer to the modified list + * is returned. Note that this function may or may not destructively + * modify the list; callers should always use this function's return + * value, rather than continuing to use the pointer passed as the + * second argument. + * + * Caution: before Postgres 8.0, the original List was unmodified and + * could be considered to retain its separate identity. This is no longer + * the case. + */ +List * +lcons(void *datum, List *list) +{ + Assert(IsPointerList(list)); + + if (list == NIL) + list = new_list(T_List, 1); + else + new_head_cell(list); + + lfirst(list_head(list)) = datum; + check_list_invariants(list); + return list; +} + +#endif /* PG_VERSION_NUM */ diff --git a/tests/cmocka/missing_stringinfo.c b/tests/cmocka/missing_stringinfo.c index 8596bf7e..80710a4e 100644 --- a/tests/cmocka/missing_stringinfo.c +++ b/tests/cmocka/missing_stringinfo.c @@ -14,6 +14,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" +#include "undef_printf.h" #include "lib/stringinfo.h" #include "utils/memutils.h" @@ -205,7 +206,13 @@ appendStringInfoSpaces(StringInfo str, int count) * if necessary. */ void -appendBinaryStringInfo(StringInfo str, const char *data, int datalen) +appendBinaryStringInfo(StringInfo str, +#if PG_VERSION_NUM < 160000 + const char *data, +#else + const void *data, +#endif + int datalen) { Assert(str != NULL); diff --git a/tests/cmocka/rangeset_tests.c b/tests/cmocka/rangeset_tests.c index 98d8d4d5..1f700bc3 100644 --- a/tests/cmocka/rangeset_tests.c +++ b/tests/cmocka/rangeset_tests.c @@ -16,6 +16,7 @@ */ static void test_irange_basic(void **state); +static void test_irange_change_lossiness(void **state); static void test_irange_list_union_merge(void **state); static void test_irange_list_union_lossy_cov(void **state); @@ -33,6 +34,7 @@ main(void) const struct CMUnitTest tests[] = { cmocka_unit_test(test_irange_basic), + cmocka_unit_test(test_irange_change_lossiness), cmocka_unit_test(test_irange_list_union_merge), cmocka_unit_test(test_irange_list_union_lossy_cov), cmocka_unit_test(test_irange_list_union_complete_cov), @@ -75,10 +77,76 @@ test_irange_basic(void **state) assert_true(is_irange_valid(irange)); /* test allocation */ - irange_list = NIL; - irange_list = lappend_irange(irange_list, irange); + irange = make_irange(100, 200, IR_LOSSY); + irange_list = lappend_irange(NIL, irange); assert_memory_equal(&irange, &linitial_irange(irange_list), sizeof(IndexRange)); assert_memory_equal(&irange, &llast_irange(irange_list), sizeof(IndexRange)); + + /* test length */ + irange_list = NIL; + assert_int_equal(irange_list_length(irange_list), 0); + irange_list = lappend_irange(irange_list, make_irange(10, 20, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 11); + irange_list = lappend_irange(irange_list, make_irange(21, 30, IR_LOSSY)); + assert_int_equal(irange_list_length(irange_list), 21); +} + + +/* Test lossiness switcher */ +static void +test_irange_change_lossiness(void **state) +{ + List *irange_list; + + /* test lossiness change (NIL) */ + irange_list = irange_list_set_lossiness(NIL, IR_LOSSY); + assert_ptr_equal(irange_list, NIL); + irange_list = irange_list_set_lossiness(NIL, IR_COMPLETE); + assert_ptr_equal(irange_list, NIL); + + /* test lossiness change (no-op) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-20]L"); + + /* test lossiness change (no-op) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[30-40]C"); + + /* test lossiness change (single element) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-20]C"); + + /* test lossiness change (single element) #2 */ + irange_list = list_make1_irange(make_irange(30, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[30-40]L"); + + /* test lossiness change (multiple elements, adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-40]C"); + + /* test lossiness change (multiple elements, adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 20, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-40]L"); + + /* test lossiness change (multiple elements, non-adjacent) #1 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_COMPLETE)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_LOSSY)); + irange_list = irange_list_set_lossiness(irange_list, IR_COMPLETE); + assert_string_equal(rangeset_print(irange_list), "[10-15]C, [21-40]C"); + + /* test lossiness change (multiple elements, non-adjacent) #2 */ + irange_list = list_make1_irange(make_irange(10, 15, IR_LOSSY)); + irange_list = lappend_irange(irange_list, make_irange(21, 40, IR_COMPLETE)); + irange_list = irange_list_set_lossiness(irange_list, IR_LOSSY); + assert_string_equal(rangeset_print(irange_list), "[10-15]L, [21-40]L"); } diff --git a/tests/cmocka/undef_printf.h b/tests/cmocka/undef_printf.h new file mode 100644 index 00000000..63ba700c --- /dev/null +++ b/tests/cmocka/undef_printf.h @@ -0,0 +1,24 @@ +#ifdef vsnprintf +#undef vsnprintf +#endif +#ifdef snprintf +#undef snprintf +#endif +#ifdef vsprintf +#undef vsprintf +#endif +#ifdef sprintf +#undef sprintf +#endif +#ifdef vfprintf +#undef vfprintf +#endif +#ifdef fprintf +#undef fprintf +#endif +#ifdef vprintf +#undef vprintf +#endif +#ifdef printf +#undef printf +#endif diff --git a/tests/python/.flake8 b/tests/python/.flake8 new file mode 100644 index 00000000..7d6f9f71 --- /dev/null +++ b/tests/python/.flake8 @@ -0,0 +1,2 @@ +[flake8] +ignore = E241, E501 diff --git a/tests/python/.gitignore b/tests/python/.gitignore new file mode 100644 index 00000000..750ecf9f --- /dev/null +++ b/tests/python/.gitignore @@ -0,0 +1 @@ +tests.log diff --git a/tests/python/.style.yapf b/tests/python/.style.yapf new file mode 100644 index 00000000..88f004bb --- /dev/null +++ b/tests/python/.style.yapf @@ -0,0 +1,5 @@ +[style] +based_on_style = pep8 +spaces_before_comment = 4 +split_before_logical_operator = false +column_limit=100 diff --git a/tests/python/Makefile b/tests/python/Makefile index cb2bc50d..8311bb12 100644 --- a/tests/python/Makefile +++ b/tests/python/Makefile @@ -1,2 +1,6 @@ partitioning_tests: - python3 -m unittest partitioning_test.py +ifneq ($(CASE),) + python3 -u partitioning_test.py Tests.$(CASE) +else + python3 -u partitioning_test.py +endif diff --git a/tests/python/partitioning_test.py b/tests/python/partitioning_test.py old mode 100755 new mode 100644 index 0d05c458..ba4b205f --- a/tests/python/partitioning_test.py +++ b/tests/python/partitioning_test.py @@ -1,1087 +1,1138 @@ #!/usr/bin/env python3 # coding: utf-8 - """ - concurrent_partitioning_test.py - Tests concurrent partitioning worker with simultaneous update queries +partitioning_test.py + Various stuff that looks out of place in regression tests - Copyright (c) 2015-2017, Postgres Professional + Copyright (c) 2015-2020, Postgres Professional """ -import unittest +import functools +import json import math -import time +import multiprocessing import os +import random import re import subprocess +import sys import threading +import time +import unittest -from testgres import get_new_node, stop_all, get_config +from packaging.version import Version +from testgres import get_new_node, get_pg_version, configure_testgres + +# set setup base logging config, it can be turned on by `use_python_logging` +# parameter on node setup +# configure_testgres(use_python_logging=True) + +import logging +import logging.config + +logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tests.log') +LOG_CONFIG = { + 'version': 1, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + 'file': { + 'class': 'logging.FileHandler', + 'filename': logfile, + 'formatter': 'base_format', + 'level': logging.DEBUG, + }, + }, + 'formatters': { + 'base_format': { + 'format': '%(node)-5s: %(message)s', + }, + }, + 'root': { + 'handlers': ('file', ), + 'level': 'DEBUG', + }, +} + +logging.config.dictConfig(LOG_CONFIG) +version = Version(get_pg_version()) -version = get_config().get("VERSION_NUM") # Helper function for json equality -def ordered(obj): - if isinstance(obj, dict): - return sorted((k, ordered(v)) for k, v in obj.items()) - if isinstance(obj, list): - return sorted(ordered(x) for x in obj) - else: - return obj - - -def if_fdw_enabled(func): - """To run tests with FDW support set environment variable TEST_FDW=1""" - def wrapper(*args, **kwargs): - if os.environ.get('FDW_DISABLED') != '1': - func(*args, **kwargs) - else: - print('Warning: FDW features tests are disabled, skipping...') - return wrapper - - -class PartitioningTests(unittest.TestCase): - - def setUp(self): - self.setup_cmd = [ - 'create table abc(id serial, t text)', - 'insert into abc select generate_series(1, 300000)', - 'select create_hash_partitions(\'abc\', \'id\', 3, partition_data := false)', - ] - - def tearDown(self): - stop_all() - - def start_new_pathman_cluster(self, name='test', allows_streaming=False): - node = get_new_node(name) - node.init(allows_streaming=allows_streaming) - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - return node - - def init_test_data(self, node): - """Initialize pg_pathman extension and test data""" - for cmd in self.setup_cmd: - node.safe_psql('postgres', cmd) - - def catchup_replica(self, master, replica): - """Wait until replica synchronizes with master""" - if version >= 100000: - wait_lsn_query = \ - 'SELECT pg_current_wal_lsn() <= replay_lsn ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - else: - wait_lsn_query = \ - 'SELECT pg_current_xlog_location() <= replay_location ' \ - 'FROM pg_stat_replication WHERE application_name = \'%s\'' \ - % replica.name - master.poll_query_until('postgres', wait_lsn_query) - - def printlog(self, logfile): - with open(logfile, 'r') as log: - for line in log.readlines(): - print(line) - - def test_concurrent(self): - """Tests concurrent partitioning""" - try: - node = self.start_new_pathman_cluster() - self.init_test_data(node) - - node.psql( - 'postgres', - 'select partition_table_concurrently(\'abc\')') - - while True: - # update some rows to check for deadlocks - node.safe_psql( - 'postgres', - ''' - update abc set t = 'test' - where id in (select (random() * 300000)::int - from generate_series(1, 3000)) - ''') - - count = node.execute( - 'postgres', - 'select count(*) from pathman_concurrent_part_tasks') - - # if there is no active workers then it means work is done - if count[0][0] == 0: - break - time.sleep(1) - - data = node.execute('postgres', 'select count(*) from only abc') - self.assertEqual(data[0][0], 0) - data = node.execute('postgres', 'select count(*) from abc') - self.assertEqual(data[0][0], 300000) - - node.stop() - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_replication(self): - """Tests how pg_pathman works with replication""" - node = get_new_node('master') - replica = get_new_node('repl') - - try: - # initialize master server - node = self.start_new_pathman_cluster(allows_streaming=True) - node.backup('my_backup') - - # initialize replica from backup - replica.init_from_backup(node, 'my_backup', has_streaming=True) - replica.start() - - # initialize pg_pathman extension and some test data - self.init_test_data(node) - - # wait until replica catches up - self.catchup_replica(node, replica) - - # check that results are equal - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - - # enable parent and see if it is enabled in replica - node.psql('postgres', 'select enable_parent(\'abc\'') - - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 300000 - ) - - # check that direct UPDATE in pathman_config_params invalidates - # cache - node.psql( - 'postgres', - 'update pathman_config_params set enable_parent = false') - self.catchup_replica(node, replica) - self.assertEqual( - node.psql('postgres', 'explain (costs off) select * from abc'), - replica.psql('postgres', 'explain (costs off) select * from abc') - ) - self.assertEqual( - node.psql('postgres', 'select * from abc'), - replica.psql('postgres', 'select * from abc') - ) - self.assertEqual( - node.execute('postgres', 'select count(*) from abc')[0][0], - 0 - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - self.printlog(replica.logs_dir + '/postgresql.log') - raise e - - def test_locks(self): - """Test that a session trying to create new partitions waits for other - sessions if they are doing the same""" - - import threading - import time - - class Flag: - def __init__(self, value): - self.flag = value - - def set(self, value): - self.flag = value - - def get(self): - return self.flag - - # There is one flag for each thread which shows if thread have done its work - flags = [Flag(False) for i in range(3)] - - # All threads synchronize though this lock - lock = threading.Lock() - - # Define thread function - def add_partition(node, flag, query): - """ We expect that this query will wait until another session - commits or rolls back""" - node.safe_psql('postgres', query) - with lock: - flag.set(True) - - # Initialize master server - node = get_new_node('master') - - try: - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.safe_psql( - 'postgres', - 'create extension pg_pathman; ' + - 'create table abc(id serial, t text); ' + - 'insert into abc select generate_series(1, 100000); ' + - 'select create_range_partitions(\'abc\', \'id\', 1, 50000);' - ) - - # Start transaction that will create partition - con = node.connect() - con.begin() - con.execute('select append_range_partition(\'abc\')') - - # Start threads that suppose to add new partitions and wait some - # time - query = [ - 'select prepend_range_partition(\'abc\')', - 'select append_range_partition(\'abc\')', - 'select add_range_partition(\'abc\', 500000, 550000)', - ] - threads = [] - for i in range(3): - thread = threading.Thread( - target=add_partition, - args=(node, flags[i], query[i])) - threads.append(thread) - thread.start() - time.sleep(3) - - # This threads should wait until current transaction finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), False) - - # Commit transaction. Since then other sessions can create - # partitions - con.commit() - - # Now wait until each thread finishes - for thread in threads: - thread.join() - - # Check flags, it should be true which means that threads are - # finished - with lock: - for i in range(3): - self.assertEqual(flags[i].get(), True) - - # Check that all partitions are created - self.assertEqual( - node.safe_psql( - 'postgres', - 'select count(*) from pg_inherits where inhparent=\'abc\'::regclass' - ), - b'6\n' - ) - except Exception as e: - self.printlog(node.logs_dir + '/postgresql.log') - raise e - - def test_tablespace(self): - """Check tablespace support""" - - def check_tablespace(node, tablename, tablespace): - res = node.execute( - 'postgres', - 'select get_tablespace(\'{}\')'.format(tablename)) - if len(res) == 0: - return False - - return res[0][0] == tablespace - - node = get_new_node('master') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman\'\n') - node.start() - node.psql('postgres', 'create extension pg_pathman') - - # create tablespace - path = os.path.join(node.data_dir, 'test_space_location') - os.mkdir(path) - node.psql( - 'postgres', - 'create tablespace test_space location \'{}\''.format(path)) - - # create table in this tablespace - node.psql( - 'postgres', - 'create table abc(a serial, b int) tablespace test_space') - - # create three partitions. Excpect that they will be created in the - # same tablespace as the parent table - node.psql( - 'postgres', - 'select create_range_partitions(\'abc\', \'a\', 1, 10, 3)') - self.assertTrue(check_tablespace(node, 'abc', 'test_space')) - - # check tablespace for appended partition - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended\')') - self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended\')') - self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) - - # check tablespace for prepended partition - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 41, 51, \'abc_added\')') - self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) - - # check tablespace for split - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added\', 45, \'abc_splitted\')') - self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) - - # now let's specify tablespace explicitly - node.psql( - 'postgres', - 'select append_range_partition(\'abc\', \'abc_appended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select prepend_range_partition(\'abc\', \'abc_prepended_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select add_range_partition(\'abc\', 61, 71, \'abc_added_2\', \'pg_default\')') - node.psql( - 'postgres', - 'select split_range_partition(\'abc_added_2\', 65, \'abc_splitted_2\', \'pg_default\')') - self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) - self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) - - @if_fdw_enabled - def test_foreign_table(self): - """Test foreign tables""" - - # Start master server - master = get_new_node('test') - master.init() - master.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - master.start() - master.psql('postgres', 'create extension pg_pathman') - master.psql('postgres', 'create extension postgres_fdw') - - # RANGE partitioning test with FDW: - # - create range partitioned table in master - # - create foreign server - # - create foreign table and insert some data into it - # - attach foreign table to partitioned one - # - try inserting data into foreign partition via parent - # - drop partitions - master.psql( - 'postgres', - '''create table abc(id serial, name text); - select create_range_partitions('abc', 'id', 0, 10, 2)''') - - # Current user name (needed for user mapping) - username = master.execute('postgres', 'select current_user')[0][0] - - # Start foreign server - fserv = get_new_node('fserv') - fserv.init().start() - fserv.safe_psql('postgres', 'create table ftable(id serial, name text)') - fserv.safe_psql('postgres', 'insert into ftable values (25, \'foreign\')') - - # Create foreign table and attach it to partitioned table - master.safe_psql( - 'postgres', - '''create server fserv - foreign data wrapper postgres_fdw - options (dbname 'postgres', host '127.0.0.1', port '{}')'''.format(fserv.port) - ) - master.safe_psql( - 'postgres', - '''create user mapping for {0} - server fserv - options (user '{0}')'''.format(username) - ) - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (ftable) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select attach_range_partition(\'abc\', \'ftable\', 20, 30)') - - # Check that table attached to partitioned table - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable'), - b'25|foreign\n' - ) - - # Check that we can successfully insert new data into foreign partition - master.safe_psql('postgres', 'insert into abc values (26, \'part\')') - self.assertEqual( - master.safe_psql('postgres', 'select * from ftable order by id'), - b'25|foreign\n26|part\n' - ) - - # Testing drop partitions (including foreign partitions) - master.safe_psql('postgres', 'select drop_partitions(\'abc\')') - - # HASH partitioning with FDW: - # - create hash partitioned table in master - # - create foreign table - # - replace local partition with foreign one - # - insert data - # - drop partitions - master.psql( - 'postgres', - '''create table hash_test(id serial, name text); - select create_hash_partitions('hash_test', 'id', 2)''') - fserv.safe_psql('postgres', 'create table f_hash_test(id serial, name text)') - - master.safe_psql( - 'postgres', - '''import foreign schema public limit to (f_hash_test) - from server fserv into public''' - ) - master.safe_psql( - 'postgres', - 'select replace_hash_partition(\'hash_test_1\', \'f_hash_test\')') - master.safe_psql('postgres', 'insert into hash_test select generate_series(1,10)') - - self.assertEqual( - master.safe_psql('postgres', 'select * from hash_test'), - b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n' - ) - master.safe_psql('postgres', 'select drop_partitions(\'hash_test\')') - - def test_parallel_nodes(self): - """Test parallel queries under partitions""" - - import json - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - 'shared_preload_libraries=\'pg_pathman, postgres_fdw\'\n') - node.start() - - # Check version of postgres server - # If version < 9.6 skip all tests for parallel queries - if version < 90600: - return - - # Prepare test database - node.psql('postgres', 'create extension pg_pathman') - node.psql('postgres', 'create table range_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table range_partitioned alter column i set not null') - node.psql('postgres', 'select create_range_partitions(\'range_partitioned\', \'i\', 1, 1e3::integer)') - node.psql('postgres', 'vacuum analyze range_partitioned') - - node.psql('postgres', 'create table hash_partitioned as select generate_series(1, 1e4::integer) i') - node.psql('postgres', 'alter table hash_partitioned alter column i set not null') - node.psql('postgres', 'select create_hash_partitions(\'hash_partitioned\', \'i\', 10)') - node.psql('postgres', 'vacuum analyze hash_partitioned') - - node.psql('postgres', """ - create or replace function query_plan(query text) returns jsonb as $$ - declare - plan jsonb; - begin - execute 'explain (costs off, format json)' || query into plan; - return plan; - end; - $$ language plpgsql; - """) - - # Test parallel select - with node.connect() as con: - con.execute('set max_parallel_workers_per_gather = 2') - if version >= 100000: - con.execute('set min_parallel_table_scan_size = 0') - else: - con.execute('set min_parallel_relation_size = 0') - con.execute('set parallel_setup_cost = 0') - con.execute('set parallel_tuple_cost = 0') - - # Check parallel aggregate plan - test_query = 'select count(*) from range_partitioned where i < 1500' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Finalize", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Aggregate", - "Strategy": "Plain", - "Partial Mode": "Partial", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check count of returned tuples - count = con.execute('select count(*) from range_partitioned where i < 1500')[0][0] - self.assertEqual(count, 1499) - - # Check simple parallel seq scan plan with limit - test_query = 'select * from range_partitioned where i < 1500 limit 5' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Limit", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Gather", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Workers Planned": 2, - "Single Copy": false, - "Plans": [ - { - "Node Type": "Append", - "Parent Relationship": "Outer", - "Parallel Aware": false, - "Plans": [ - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_2", - "Alias": "range_partitioned_2", - "Filter": "(i < 1500)" - }, - { - "Node Type": "Seq Scan", - "Parent Relationship": "Member", - "Parallel Aware": true, - "Relation Name": "range_partitioned_1", - "Alias": "range_partitioned_1" - } - ] - } - ] - } - ] - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Check tuples returned by query above - res_tuples = con.execute('select * from range_partitioned where i < 1500 limit 5') - res_tuples = sorted(map(lambda x: x[0], res_tuples)) - expected = [1, 2, 3, 4, 5] - self.assertEqual(res_tuples, expected) - - # Check the case when none partition is selected in result plan - test_query = 'select * from range_partitioned where i < 1' - plan = con.execute('select query_plan(\'%s\')' % test_query)[0][0] - expected = json.loads(""" - [ - { - "Plan": { - "Node Type": "Result", - "Parallel Aware": false, - "One-Time Filter": "false" - } - } - ] - """) - self.assertEqual(ordered(plan), ordered(expected)) - - # Remove all objects for testing - node.psql('postgres', 'drop table range_partitioned cascade') - node.psql('postgres', 'drop table hash_partitioned cascade') - node.psql('postgres', 'drop extension pg_pathman cascade') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_creation_insert(self): - """Test concurrent partition creation on INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute('insert into ins_test select generate_series(1, 50)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.execute('insert into ins_test values(51)') - con2.commit() - - # Step 1: lock partitioned table in con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - con1.execute('lock table ins_test in share update exclusive mode') - - # Step 2: try inserting new value in con2 (waiting) - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - t = threading.Thread(target=con2_thread) - t.start() - - # Step 3: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 4: try inserting new value in con1 (success, unlock) - con1.execute('insert into ins_test values(52)') - con1.commit() - - # Step 5: wait for con2 - t.join() - - rows = con1.execute(""" - select * from pathman_partition_list - where parent = 'ins_test'::regclass - order by range_min, range_max - """) - - # check number of partitions - self.assertEqual(len(rows), 6) - - # check range_max of partitions - self.assertEqual(int(rows[0][5]), 11) - self.assertEqual(int(rows[1][5]), 21) - self.assertEqual(int(rows[2][5]), 31) - self.assertEqual(int(rows[3][5]), 41) - self.assertEqual(int(rows[4][5]), 51) - self.assertEqual(int(rows[5][5]), 61) - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_conc_part_merge_insert(self): - """Test concurrent merge_range_partitions() + INSERT""" - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create table 'ins_test' and partition it - with node.connect() as con0: - con0.begin() - con0.execute('create table ins_test(val int not null)') - con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") - con0.commit() - - # Create two separate connections for this test - with node.connect() as con1, node.connect() as con2: - - # Thread for connection #2 (it has to wait) - def con2_thread(): - con2.begin() - con2.execute('insert into ins_test values(20)') - con2.commit() - - # Step 1: initilize con1 - con1.begin() - con1.execute('select count(*) from ins_test') # load pathman's cache - - # Step 2: initilize con2 - con2.begin() - con2.execute('select count(*) from ins_test') # load pathman's cache - con2.commit() # unlock relations - - # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) - con1.execute("select merge_range_partitions('ins_test_1', 'ins_test_2')") - - # Step 4: try inserting new value in con2 (waiting) - t = threading.Thread(target=con2_thread) - t.start() - - # Step 5: wait until 't' locks - while True: - with node.connect() as con0: - locks = con0.execute(""" - select count(*) from pg_locks where granted = 'f' - """) - - if int(locks[0][0]) > 0: - break - - # Step 6: finish merge in con1 (success, unlock) - con1.commit() - - # Step 7: wait for con2 - t.join() - - rows = con1.execute("select *, tableoid::regclass::text from ins_test") - - # check number of rows in table - self.assertEqual(len(rows), 1) - - # check value that has been inserted - self.assertEqual(int(rows[0][0]), 20) - - # check partition that was chosen for insert - self.assertEqual(str(rows[0][1]), 'ins_test_1') - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_pg_dump(self): - """ - Test using dump and restore of partitioned table through pg_dump and pg_restore tools. - - Test strategy: - - test range and hash partitioned tables; - - for each partitioned table check on restorable side the following quantities: - * constraints related to partitioning; - * init callback function and enable parent flag; - * number of rows in parent and child tables; - * plan validity of simple SELECT query under partitioned table; - - check dumping using the following parameters of pg_dump: - * format = plain | custom; - * using of inserts and copy. - - all test cases are carried out on tables half-full with data located in parent part, - the rest of data - in child tables. - """ - - import subprocess - - # Init and start postgres instance with preload pg_pathman module - node = get_new_node('test') - node.init() - node.append_conf( - 'postgresql.conf', - """ - shared_preload_libraries=\'pg_pathman\' - pg_pathman.override_copy=false - """) - node.start() - - # Init two databases: initial and copy - node.psql('postgres', 'create database initial') - node.psql('postgres', 'create database copy') - node.psql('initial', 'create extension pg_pathman') - - # Create and fillin partitioned table in initial database - with node.connect('initial') as con: - - # create and initailly fillin tables - con.execute('create table range_partitioned (i integer not null)') - con.execute('insert into range_partitioned select i from generate_series(1, 500) i') - con.execute('create table hash_partitioned (i integer not null)') - con.execute('insert into hash_partitioned select i from generate_series(1, 500) i') - - # partition table keeping data in base table - # enable_parent parameter automatically becames true - con.execute('select create_range_partitions(\'range_partitioned\', \'i\', 1, 200, partition_data := false)') - con.execute('select create_hash_partitions(\'hash_partitioned\', \'i\', 5, false)') - - # fillin child tables with remain data - con.execute('insert into range_partitioned select i from generate_series(501, 1000) i') - con.execute('insert into hash_partitioned select i from generate_series(501, 1000) i') - - # set init callback - con.execute(""" - create or replace function init_partition_stub_callback(args jsonb) - returns void as $$ - begin - end - $$ language plpgsql; - """) - con.execute('select set_init_callback(\'range_partitioned\', \'init_partition_stub_callback(jsonb)\')') - con.execute('select set_init_callback(\'hash_partitioned\', \'init_partition_stub_callback(jsonb)\')') - - # turn off enable_parent option - con.execute('select set_enable_parent(\'range_partitioned\', false)') - con.execute('select set_enable_parent(\'hash_partitioned\', false)') - - con.commit() - - # compare strategies - CMP_OK, PLANS_MISMATCH, CONTENTS_MISMATCH = range(3) - def cmp_full(con1, con2): - """Compare selection partitions in plan and contents in partitioned tables""" - - plan_query = 'explain (costs off, format json) select * from %s' - content_query = 'select * from %s order by i' - table_refs = [ - 'range_partitioned', - 'only range_partitioned', - 'hash_partitioned', - 'only hash_partitioned' - ] - for table_ref in table_refs: - plan_initial = con1.execute(plan_query % table_ref)[0][0][0]['Plan'] - plan_copy = con2.execute(plan_query % table_ref)[0][0][0]['Plan'] - if ordered(plan_initial) != ordered(plan_copy): - return PLANS_MISMATCH - - content_initial = [x[0] for x in con1.execute(content_query % table_ref)] - content_copy = [x[0] for x in con2.execute(content_query % table_ref)] - if content_initial != content_copy: - return CONTENTS_MISMATCH - - return CMP_OK - - def turnoff_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to off') - node.reload() - - def turnon_pathman(node): - node.psql('initial', 'alter system set pg_pathman.enable to on') - node.psql('copy', 'alter system set pg_pathman.enable to on') - node.psql('initial', 'alter system set pg_pathman.override_copy to off') - node.psql('copy', 'alter system set pg_pathman.override_copy to off') - node.reload() - - # Test dump/restore from init database to copy functionality - test_params = [ - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via COPY - (turnoff_pathman, - turnon_pathman, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--inserts", - "initial"], - [node.get_bin_path("psql"), - "-p {}".format(node.port), - "copy"], - cmp_full), # dump as plain text and restore via INSERTs - (None, - None, - [node.get_bin_path("pg_dump"), - "-p {}".format(node.port), - "--format=custom", - "initial"], - [node.get_bin_path("pg_restore"), - "-p {}".format(node.port), - "--dbname=copy"], - cmp_full), # dump in archive format - ] - - try: - FNULL = open(os.devnull, 'w') - - for preproc, postproc, pg_dump_params, pg_restore_params, cmp_dbs in test_params: - - dump_restore_cmd = " | ".join((' '.join(pg_dump_params), ' '.join(pg_restore_params))) - - if (preproc != None): - preproc(node) - - # transfer and restore data - p1 = subprocess.Popen(pg_dump_params, stdout=subprocess.PIPE) - stdoutdata, _ = p1.communicate() - p2 = subprocess.Popen(pg_restore_params, stdin=subprocess.PIPE, - stdout=FNULL, stderr=FNULL) - p2.communicate(input=stdoutdata) - - if (postproc != None): - postproc(node) - - # check validity of data - with node.connect('initial') as con1, node.connect('copy') as con2: - - # compare plans and contents of initial and copy - cmp_result = cmp_dbs(con1, con2) - self.assertNotEqual(cmp_result, PLANS_MISMATCH, - "mismatch in plans of select query on partitioned tables under the command: %s" % dump_restore_cmd) - self.assertNotEqual(cmp_result, CONTENTS_MISMATCH, - "mismatch in contents of partitioned tables under the command: %s" % dump_restore_cmd) - - # compare enable_parent flag and callback function - config_params_query = """ - select partrel, enable_parent, init_callback from pathman_config_params - """ - config_params_initial, config_params_copy = {}, {} - for row in con1.execute(config_params_query): - config_params_initial[row[0]] = row[1:] - for row in con2.execute(config_params_query): - config_params_copy[row[0]] = row[1:] - self.assertEqual(config_params_initial, config_params_copy, \ - "mismatch in pathman_config_params under the command: %s" % dump_restore_cmd) - - # compare constraints on each partition - constraints_query = """ - select r.relname, c.conname, c.consrc from - pg_constraint c join pg_class r on c.conrelid=r.oid - where relname similar to '(range|hash)_partitioned_\d+' - """ - constraints_initial, constraints_copy = {}, {} - for row in con1.execute(constraints_query): - constraints_initial[row[0]] = row[1:] - for row in con2.execute(constraints_query): - constraints_copy[row[0]] = row[1:] - self.assertEqual(constraints_initial, constraints_copy, \ - "mismatch in partitions' constraints under the command: %s" % dump_restore_cmd) - - # clear copy database - node.psql('copy', 'drop schema public cascade') - node.psql('copy', 'create schema public') - node.psql('copy', 'drop extension pg_pathman cascade') - - except: - raise - finally: - FNULL.close() - - # Stop instance and finish work - node.stop() - node.cleanup() - - def test_concurrent_detach(self): - """Test concurrent detach partition with contiguous tuple inserting and spawning new partitions""" - - # Init parameters - num_insert_workers = 8 - detach_timeout = 0.1 # time in sec between successive inserts and detachs - num_detachs = 100 # estimated number of detachs - inserts_advance = 1 # abvance in sec of inserts process under detachs - test_interval = int(math.ceil(detach_timeout * num_detachs)) - - insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/insert_current_timestamp.pgbench" - detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ - + "/pgbench_scripts/detachs_in_timeout.pgbench" - - # Check pgbench scripts on existance - self.assertTrue(os.path.isfile(insert_pgbench_script), - msg="pgbench script with insert timestamp doesn't exist") - self.assertTrue(os.path.isfile(detach_pgbench_script), - msg="pgbench script with detach letfmost partition doesn't exist") - - # Create and start new instance - node = self.start_new_pathman_cluster(allows_streaming=False) - - # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec - with node.connect() as con0: - con0.begin() - con0.execute('create table ts_range_partitioned(ts timestamp not null)') - con0.execute("select create_range_partitions('ts_range_partitioned', 'ts', current_timestamp, interval '%f', 1)" % detach_timeout) - con0.commit() - - # Run in background inserts and detachs processes - FNULL = open(os.devnull, 'w') - - # init pgbench's utility tables - init_pgbench = node.pgbench(stdout=FNULL, stderr=FNULL, options=["-i"]) - init_pgbench.wait() - - inserts = node.pgbench(stdout=FNULL, stderr=subprocess.PIPE, options=[ - "-j", "%i" % num_insert_workers, - "-c", "%i" % num_insert_workers, - "-f", insert_pgbench_script, - "-T", "%i" % (test_interval+inserts_advance) - ]) - time.sleep(inserts_advance) - detachs = node.pgbench(stdout=FNULL, stderr=FNULL, options=[ - "-D", "timeout=%f" % detach_timeout, - "-f", detach_pgbench_script, - "-T", "%i" % test_interval - ]) - - # Wait for completion of processes - _, stderrdata = inserts.communicate() - detachs.wait() - - # Obtain error log from inserts process - self.assertIsNone(re.search("ERROR|FATAL|PANIC", str(stderrdata)), - msg="Race condition between detach and concurrent inserts with append partition is expired") - - # Stop instance and finish work - node.stop() - node.cleanup() - FNULL.close() +def ordered(obj, skip_keys=None): + if isinstance(obj, dict): + return sorted((k, ordered(v, skip_keys=skip_keys)) for k, v in obj.items() + if skip_keys is None or (skip_keys and k not in skip_keys)) + if isinstance(obj, list): + return sorted(ordered(x, skip_keys=skip_keys) for x in obj) + else: + return obj + + +# Check if postgres_fdw is available +@functools.lru_cache(maxsize=1) +def is_postgres_fdw_ready(): + with get_new_node().init().start() as node: + result = node.execute(""" + select count(*) from pg_available_extensions where name = 'postgres_fdw' + """) + + return result[0][0] > 0 + + +class Tests(unittest.TestCase): + def set_trace(self, con, command="pg_debug"): + pid = con.execute("select pg_backend_pid()")[0][0] + p = subprocess.Popen([command], stdin=subprocess.PIPE) + p.communicate(str(pid).encode()) + + def start_new_pathman_cluster(self, + allow_streaming=False, + test_data=False, + enable_partitionrouter=False): + + node = get_new_node() + node.init(allow_streaming=allow_streaming) + node.append_conf("shared_preload_libraries='pg_pathman'\n") + if enable_partitionrouter: + node.append_conf("pg_pathman.enable_partitionrouter=on\n") + + node.start() + node.psql('create extension pg_pathman') + + if test_data: + node.safe_psql(""" + create table abc(id serial, t text); + insert into abc select generate_series(1, 300000); + select create_hash_partitions('abc', 'id', 3, partition_data := false); + """) + + node.safe_psql('vacuum analyze') + + return node + + def test_concurrent(self): + """ Test concurrent partitioning """ + + with self.start_new_pathman_cluster(test_data=True) as node: + node.psql("select partition_table_concurrently('abc')") + + while True: + # update some rows to check for deadlocks + node.safe_psql(""" + update abc set t = 'test' + where id in (select (random() * 300000)::int + from generate_series(1, 3000)) + """) + + count = node.execute(""" + select count(*) from pathman_concurrent_part_tasks + """) + + # if there is no active workers then it means work is done + if count[0][0] == 0: + break + time.sleep(1) + + data = node.execute('select count(*) from only abc') + self.assertEqual(data[0][0], 0) + data = node.execute('select count(*) from abc') + self.assertEqual(data[0][0], 300000) + node.stop() + + def test_replication(self): + """ Test how pg_pathman works with replication """ + + with self.start_new_pathman_cluster(allow_streaming=True, test_data=True) as node: + with node.replicate() as replica: + replica.start() + replica.catchup() + + # check that results are equal + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + + # enable parent and see if it is enabled in replica + node.psql("select enable_parent('abc')") + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 300000) + + # check that UPDATE in pathman_config_params invalidates cache + node.psql('update pathman_config_params set enable_parent = false') + + # wait until replica catches up + replica.catchup() + + self.assertEqual( + node.psql('explain (costs off) select * from abc'), + replica.psql('explain (costs off) select * from abc')) + self.assertEqual( + node.psql('select * from abc'), + replica.psql('select * from abc')) + self.assertEqual( + node.execute('select count(*) from abc')[0][0], 0) + + def test_locks(self): + """ + Test that a session trying to create new partitions + waits for other sessions if they are doing the same + """ + + class Flag: + def __init__(self, value): + self.flag = value + + def set(self, value): + self.flag = value + + def get(self): + return self.flag + + # There is one flag for each thread which shows if thread have done its work + flags = [Flag(False) for i in range(3)] + + # All threads synchronize though this lock + lock = threading.Lock() + + # Define thread function + def add_partition(node, flag, query): + """ + We expect that this query will wait until + another session commits or rolls back + """ + node.safe_psql(query) + with lock: + flag.set(True) + + # Initialize master server + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + + node.safe_psql(""" + create extension pg_pathman; + create table abc(id serial, t text); + insert into abc select generate_series(1, 100000); + select create_range_partitions('abc', 'id', 1, 50000); + """) + + # Start transaction that will create partition + with node.connect() as con: + con.begin() + con.execute("select append_range_partition('abc')") + + # Start threads that suppose to add new partitions and wait some + # time + query = ( + "select prepend_range_partition('abc')", + "select append_range_partition('abc')", + "select add_range_partition('abc', 500000, 550000)", + ) + + threads = [] + for i in range(3): + thread = threading.Thread( + target=add_partition, args=(node, flags[i], query[i])) + threads.append(thread) + thread.start() + time.sleep(3) + + # These threads should wait until current transaction finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), False) + + # Commit transaction. Since then other sessions can create + # partitions + con.commit() + + # Now wait until each thread finishes + for thread in threads: + thread.join() + + # Check flags, it should be true which means that threads are + # finished + with lock: + for i in range(3): + self.assertEqual(flags[i].get(), True) + + # Check that all partitions are created + self.assertEqual( + node.safe_psql( + "select count(*) from pg_inherits where inhparent='abc'::regclass"), + b'6\n') + + def test_tablespace(self): + """ Check tablespace support """ + + def check_tablespace(node, tablename, tablespace): + res = node.execute("select get_tablespace('{}')".format(tablename)) + if len(res) == 0: + return False + + return res[0][0] == tablespace + + with get_new_node() as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'") + node.start() + node.psql('create extension pg_pathman') + + # create tablespace + path = os.path.join(node.data_dir, 'test_space_location') + os.mkdir(path) + node.psql("create tablespace test_space location '{}'".format(path)) + + # create table in this tablespace + node.psql('create table abc(a serial, b int) tablespace test_space') + + # create three partitions. Excpect that they will be created in the + # same tablespace as the parent table + node.psql("select create_range_partitions('abc', 'a', 1, 10, 3)") + self.assertTrue(check_tablespace(node, 'abc', 'test_space')) + + # check tablespace for appended partition + node.psql("select append_range_partition('abc', 'abc_appended')") + self.assertTrue(check_tablespace(node, 'abc_appended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select prepend_range_partition('abc', 'abc_prepended')") + self.assertTrue(check_tablespace(node, 'abc_prepended', 'test_space')) + + # check tablespace for prepended partition + node.psql("select add_range_partition('abc', 41, 51, 'abc_added')") + self.assertTrue(check_tablespace(node, 'abc_added', 'test_space')) + + # check tablespace for split + node.psql("select split_range_partition('abc_added', 45, 'abc_splitted')") + self.assertTrue(check_tablespace(node, 'abc_splitted', 'test_space')) + + # now let's specify tablespace explicitly + node.psql( + "select append_range_partition('abc', 'abc_appended_2', 'pg_default')" + ) + node.psql( + "select prepend_range_partition('abc', 'abc_prepended_2', 'pg_default')" + ) + node.psql( + "select add_range_partition('abc', 61, 71, 'abc_added_2', 'pg_default')" + ) + node.psql( + "select split_range_partition('abc_added_2', 65, 'abc_splitted_2', 'pg_default')" + ) + + # yapf: disable + self.assertTrue(check_tablespace(node, 'abc_appended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_prepended_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_added_2', 'pg_default')) + self.assertTrue(check_tablespace(node, 'abc_splitted_2', 'pg_default')) + + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') + def test_foreign_table(self): + """ Test foreign tables """ + + # Start master server + with get_new_node() as master, get_new_node() as fserv: + master.init() + master.append_conf(""" + shared_preload_libraries='pg_pathman, postgres_fdw'\n + """) + master.start() + master.psql('create extension pg_pathman') + master.psql('create extension postgres_fdw') + + # RANGE partitioning test with FDW: + # - create range partitioned table in master + # - create foreign server + # - create foreign table and insert some data into it + # - attach foreign table to partitioned one + # - try inserting data into foreign partition via parent + # - drop partitions + master.psql(""" + create table abc(id serial, name text); + select create_range_partitions('abc', 'id', 0, 10, 2) + """) + + # Current user name (needed for user mapping) + username = master.execute('select current_user')[0][0] + + fserv.init().start() + fserv.safe_psql("create table ftable(id serial, name text)") + fserv.safe_psql("insert into ftable values (25, 'foreign')") + + # Create foreign table and attach it to partitioned table + master.safe_psql(""" + create server fserv + foreign data wrapper postgres_fdw + options (dbname 'postgres', host '127.0.0.1', port '{}') + """.format(fserv.port)) + + master.safe_psql(""" + create user mapping for {0} server fserv + options (user '{0}') + """.format(username)) + + master.safe_psql(""" + import foreign schema public limit to (ftable) + from server fserv into public + """) + + master.safe_psql( + "select attach_range_partition('abc', 'ftable', 20, 30)") + + # Check that table attached to partitioned table + self.assertEqual( + master.safe_psql('select * from ftable'), + b'25|foreign\n') + + # Check that we can successfully insert new data into foreign partition + master.safe_psql("insert into abc values (26, 'part')") + self.assertEqual( + master.safe_psql('select * from ftable order by id'), + b'25|foreign\n26|part\n') + + # Testing drop partitions (including foreign partitions) + master.safe_psql("select drop_partitions('abc')") + + # HASH partitioning with FDW: + # - create hash partitioned table in master + # - create foreign table + # - replace local partition with foreign one + # - insert data + # - drop partitions + master.psql(""" + create table hash_test(id serial, name text); + select create_hash_partitions('hash_test', 'id', 2) + """) + fserv.safe_psql('create table f_hash_test(id serial, name text)') + + master.safe_psql(""" + import foreign schema public limit to (f_hash_test) + from server fserv into public + """) + master.safe_psql(""" + select replace_hash_partition('hash_test_1', 'f_hash_test') + """) + master.safe_psql('insert into hash_test select generate_series(1,10)') + + self.assertEqual( + master.safe_psql('select * from hash_test'), + b'1|\n2|\n5|\n6|\n8|\n9|\n3|\n4|\n7|\n10|\n') + master.safe_psql("select drop_partitions('hash_test')") + + @unittest.skipUnless(is_postgres_fdw_ready(), 'FDW might be missing') + def test_parallel_nodes(self): + """ Test parallel queries under partitions """ + + # Init and start postgres instance with preload pg_pathman module + with get_new_node() as node: + node.init() + node.append_conf( + "shared_preload_libraries='pg_pathman, postgres_fdw'") + node.start() + + # Check version of postgres server + # If version < 9.6 skip all tests for parallel queries + if version < Version('9.6.0'): + return + + # Prepare test database + node.psql('create extension pg_pathman') + node.psql(""" + create table range_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table range_partitioned alter column i set not null; + select create_range_partitions('range_partitioned', 'i', 1, 1e3::integer); + + create table hash_partitioned as + select generate_series(1, 1e4::integer) i; + + alter table hash_partitioned alter column i set not null; + select create_hash_partitions('hash_partitioned', 'i', 10); + """) + + # create statistics for both partitioned tables + node.psql('vacuum analyze') + + node.psql(""" + create or replace function query_plan(query text) + returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + # Test parallel select + with node.connect() as con: + con.execute('set max_parallel_workers_per_gather = 2') + if version >= Version('10'): + con.execute('set min_parallel_table_scan_size = 0') + else: + con.execute('set min_parallel_relation_size = 0') + con.execute('set parallel_setup_cost = 0') + con.execute('set parallel_tuple_cost = 0') + + # Check parallel aggregate plan + test_query = 'select count(*) from range_partitioned where i < 1500' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Finalize", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Aggregate", + "Strategy": "Plain", + "Partial Mode": "Partial", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) + + # Check count of returned tuples + count = con.execute( + 'select count(*) from range_partitioned where i < 1500')[0][0] + self.assertEqual(count, 1499) + + # Check simple parallel seq scan plan with limit + test_query = 'select * from range_partitioned where i < 1500 limit 5' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Limit", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Gather", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Workers Planned": 2, + "Single Copy": false, + "Plans": [ + { + "Node Type": "Append", + "Parent Relationship": "Outer", + "Parallel Aware": false, + "Plans": [ + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_2", + "Alias": "range_partitioned_2", + "Filter": "(i < 1500)" + }, + { + "Node Type": "Seq Scan", + "Parent Relationship": "Member", + "Parallel Aware": true, + "Relation Name": "range_partitioned_1", + "Alias": "range_partitioned_1" + } + ] + } + ] + } + ] + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Subplans Removed', 'Async Capable']), ordered(expected)) + + # Check tuples returned by query above + res_tuples = con.execute( + 'select * from range_partitioned where i < 1500 limit 5') + res_tuples = sorted(map(lambda x: x[0], res_tuples)) + expected = [1, 2, 3, 4, 5] + self.assertEqual(res_tuples, expected) + + # Check the case when none partition is selected in result plan + test_query = 'select * from range_partitioned where i < 1' + plan = con.execute("select query_plan('%s')" % test_query)[0][0] + expected = json.loads(""" + [ + { + "Plan": { + "Node Type": "Result", + "Parallel Aware": false, + "One-Time Filter": "false" + } + } + ] + """) + self.assertEqual(ordered(plan, skip_keys=['Async Capable']), ordered(expected)) + + # Remove all objects for testing + node.psql('drop table range_partitioned cascade') + node.psql('drop table hash_partitioned cascade') + node.psql('drop extension pg_pathman cascade') + + def test_conc_part_drop_runtime_append(self): + """ Test concurrent partition drop + SELECT (RuntimeAppend) """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'drop_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table drop_test(val int not null)") + con0.execute("insert into drop_test select generate_series(1, 1000)") + con0.execute("select create_range_partitions('drop_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + try: + from queue import Queue + except ImportError: + from Queue import Queue + + # return values from thread + queue = Queue() + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('set enable_hashjoin = f') + con2.execute('set enable_mergejoin = f') + + res = con2.execute(""" + explain (analyze, costs off, timing off) + select * from drop_test + where val = any (select generate_series(22, 40, 13)) + """) # query selects from drop_test_3 and drop_test_4 + + con2.commit() + + has_runtime_append = False + has_drop_test_3 = False + has_drop_test_4 = False + + for row in res: + if row[0].find('RuntimeAppend') >= 0: + has_runtime_append = True + continue + + if row[0].find('drop_test_3') >= 0: + has_drop_test_3 = True + continue + + if row[0].find('drop_test_4') >= 0: + has_drop_test_4 = True + continue + + # return all values in tuple + queue.put((has_runtime_append, has_drop_test_3, has_drop_test_4)) + + # Step 1: cache partitioned table in con1 + con1.begin() + con1.execute('select count(*) from drop_test') # load pathman's cache + con1.commit() + + # Step 2: cache partitioned table in con2 + con2.begin() + con2.execute('select count(*) from drop_test') # load pathman's cache + con2.commit() + + # Step 3: drop first partition of 'drop_test' + con1.begin() + con1.execute('drop table drop_test_3') + + # Step 4: try executing select (RuntimeAppend) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: commit 'DROP TABLE' + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'drop_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 99) + + # check RuntimeAppend + selected partitions + (has_runtime_append, has_drop_test_3, has_drop_test_4) = queue.get() + self.assertTrue(has_runtime_append) + self.assertFalse(has_drop_test_3) + self.assertTrue(has_drop_test_4) + + def test_conc_part_creation_insert(self): + """ Test concurrent partition creation on INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("insert into ins_test select generate_series(1, 50)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.execute('insert into ins_test values(51)') + con2.commit() + + # Step 1: lock partitioned table in con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + con1.execute('lock table ins_test in share update exclusive mode') + + # Step 2: try inserting new value in con2 (waiting) + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + t = threading.Thread(target=con2_thread) + t.start() + + # Step 3: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 4: try inserting new value in con1 (success, unlock) + con1.execute('insert into ins_test values(52)') + con1.commit() + + # Step 5: wait for con2 + t.join() + + rows = con1.execute(""" + select * from pathman_partition_list + where parent = 'ins_test'::regclass + order by range_min, range_max + """) + + # check number of partitions + self.assertEqual(len(rows), 6) + + # check range_max of partitions + self.assertEqual(int(rows[0][5]), 11) + self.assertEqual(int(rows[1][5]), 21) + self.assertEqual(int(rows[2][5]), 31) + self.assertEqual(int(rows[3][5]), 41) + self.assertEqual(int(rows[4][5]), 51) + self.assertEqual(int(rows[5][5]), 61) + + def test_conc_part_merge_insert(self): + """ Test concurrent merge_range_partitions() + INSERT """ + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create table 'ins_test' and partition it + with node.connect() as con0: + # yapf: disable + con0.begin() + con0.execute("create table ins_test(val int not null)") + con0.execute("select create_range_partitions('ins_test', 'val', 1, 10, 10)") + con0.commit() + + # Create two separate connections for this test + with node.connect() as con1, node.connect() as con2: + + # Thread for connection #2 (it has to wait) + def con2_thread(): + con2.begin() + con2.execute('insert into ins_test values(20)') + con2.commit() + + # Step 1: initilize con1 + con1.begin() + con1.execute('select count(*) from ins_test') # load pathman's cache + + # Step 2: initilize con2 + con2.begin() + con2.execute('select count(*) from ins_test') # load pathman's cache + con2.commit() # unlock relations + + # Step 3: merge 'ins_test1' + 'ins_test_2' in con1 (success) + con1.execute( + "select merge_range_partitions('ins_test_1', 'ins_test_2')") + + # Step 4: try inserting new value in con2 (waiting) + t = threading.Thread(target=con2_thread) + t.start() + + # Step 5: wait until 't' locks + while True: + with node.connect() as con0: + locks = con0.execute(""" + select count(*) from pg_locks where granted = 'f' + """) + + if int(locks[0][0]) > 0: + break + + # Step 6: finish merge in con1 (success, unlock) + con1.commit() + + # Step 7: wait for con2 + t.join() + + rows = con1.execute("select *, tableoid::regclass::text from ins_test") + + # check number of rows in table + self.assertEqual(len(rows), 1) + + # check value that has been inserted + self.assertEqual(int(rows[0][0]), 20) + + # check partition that was chosen for insert + self.assertEqual(str(rows[0][1]), 'ins_test_1') + + def test_pg_dump(self): + with self.start_new_pathman_cluster() as node: + node.safe_psql('create database copy') + + node.safe_psql(""" + create table test_hash(val int not null); + select create_hash_partitions('test_hash', 'val', 10); + insert into test_hash select generate_series(1, 90); + + create table test_range(val int not null); + select create_range_partitions('test_range', 'val', 1, 10, 10); + insert into test_range select generate_series(1, 95); + """) + + dump = node.dump() + node.restore(dbname='copy', filename=dump) + os.remove(dump) + + # HASH + a = node.execute('postgres', 'select * from test_hash order by val') + b = node.execute('copy', 'select * from test_hash order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_hash order by val') + d = node.execute('copy', 'select * from only test_hash order by val') + self.assertEqual(c, d) + + # RANGE + a = node.execute('postgres', 'select * from test_range order by val') + b = node.execute('copy', 'select * from test_range order by val') + self.assertEqual(a, b) + c = node.execute('postgres', 'select * from only test_range order by val') + d = node.execute('copy', 'select * from only test_range order by val') + self.assertEqual(c, d) + + # check partition sets + p1 = node.execute('postgres', 'select * from pathman_partition_list') + p2 = node.execute('copy', 'select * from pathman_partition_list') + self.assertEqual(sorted(p1), sorted(p2)) + + def test_concurrent_detach(self): + """ + Test concurrent detach partition with contiguous + tuple inserting and spawning new partitions + """ + + # Init parameters + num_insert_workers = 8 + detach_timeout = 0.1 # time in sec between successive inserts and detachs + num_detachs = 100 # estimated number of detachs + inserts_advance = 1 # abvance in sec of inserts process under detachs + test_interval = int(math.ceil(detach_timeout * num_detachs)) + + insert_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/insert_current_timestamp.pgbench" + detach_pgbench_script = os.path.dirname(os.path.realpath(__file__)) \ + + "/pgbench_scripts/detachs_in_timeout.pgbench" + + # Check pgbench scripts on existance + self.assertTrue( + os.path.isfile(insert_pgbench_script), + msg="pgbench script with insert timestamp doesn't exist") + + self.assertTrue( + os.path.isfile(detach_pgbench_script), + msg="pgbench script with detach letfmost partition doesn't exist") + + # Create and start new instance + with self.start_new_pathman_cluster(allow_streaming=False) as node: + # Create partitioned table for testing that spawns new partition on each next *detach_timeout* sec + with node.connect() as con0: + con0.begin() + con0.execute( + 'create table ts_range_partitioned(ts timestamp not null)') + + # yapf: disable + con0.execute(""" + select create_range_partitions('ts_range_partitioned', + 'ts', + current_timestamp, + interval '%f', + 1) + """ % detach_timeout) + con0.commit() + + # Run in background inserts and detachs processes + with open(os.devnull, 'w') as fnull: + # init pgbench's utility tables + init_pgbench = node.pgbench(stdout=fnull, stderr=fnull, options=["-i"]) + init_pgbench.wait() + + inserts = node.pgbench( + stdout=fnull, + stderr=subprocess.PIPE, + options=[ + "-j", + "%i" % num_insert_workers, "-c", + "%i" % num_insert_workers, "-f", insert_pgbench_script, "-T", + "%i" % (test_interval + inserts_advance) + ]) + time.sleep(inserts_advance) + detachs = node.pgbench( + stdout=fnull, + stderr=fnull, + options=[ + "-D", + "timeout=%f" % detach_timeout, "-f", detach_pgbench_script, + "-T", + "%i" % test_interval + ]) + + # Wait for completion of processes + _, stderrdata = inserts.communicate() + detachs.wait() + + # Obtain error log from inserts process + self.assertIsNone( + re.search("ERROR|FATAL|PANIC", str(stderrdata)), + msg=""" + Race condition between detach and concurrent + inserts with append partition is expired + """) + + def test_update_node_plan1(self): + ''' + Test scan on all partititions when using update node. + We can't use regression tests here because 9.5 and 9.6 give + different plans + ''' + + with get_new_node('test_update_node') as node: + node.init() + node.append_conf('postgresql.conf', """ + shared_preload_libraries=\'pg_pathman\' + pg_pathman.override_copy=false + pg_pathman.enable_partitionrouter=on + """) + node.start() + + # Prepare test database + node.psql('postgres', 'CREATE EXTENSION pg_pathman;') + node.psql('postgres', 'CREATE SCHEMA test_update_node;') + node.psql('postgres', 'CREATE TABLE test_update_node.test_range(val NUMERIC NOT NULL, comment TEXT)') + node.psql('postgres', 'INSERT INTO test_update_node.test_range SELECT i, i FROM generate_series(1, 100) i;') + node.psql('postgres', "SELECT create_range_partitions('test_update_node.test_range', 'val', 1, 10);") + + node.psql('postgres', """ + create or replace function query_plan(query text) returns jsonb as $$ + declare + plan jsonb; + begin + execute 'explain (costs off, format json)' || query into plan; + return plan; + end; + $$ language plpgsql; + """) + + with node.connect() as con: + test_query = "UPDATE test_update_node.test_range SET val = 14 WHERE comment=''15''" + plan = con.execute('SELECT query_plan(\'%s\')' % test_query)[0][0] + plan = plan[0]["Plan"] + + # PartitionOverseer + self.assertEqual(plan["Node Type"], "Custom Scan") + self.assertEqual(plan["Custom Plan Provider"], 'PartitionOverseer') + + # ModifyTable + plan = plan["Plans"][0] + self.assertEqual(plan["Node Type"], "ModifyTable") + self.assertEqual(plan["Operation"], "Update") + self.assertEqual(plan["Relation Name"], "test_range") + self.assertEqual(len(plan["Target Tables"]), 11) + + # Plan was seriously changed in vanilla since v14 + if version < Version('14'): + expected_format = ''' + { + "Plans": [ + { + "Plans": [ + { + "Filter": "(comment = '15'::text)", + "Node Type": "Seq Scan", + "Relation Name": "test_range%s", + "Parent Relationship": "child" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "child", + "Custom Plan Provider": "PartitionRouter" + } + ], + "Node Type": "Custom Scan", + "Parent Relationship": "Member", + "Custom Plan Provider": "PartitionFilter" + } + ''' + + for i, f in enumerate([''] + list(map(str, range(1, 10)))): + num = '_' + f if f else '' + expected = json.loads(expected_format % num) + p = ordered(plan["Plans"][i], skip_keys=['Parallel Aware', 'Alias']) + self.assertEqual(p, ordered(expected)) + + node.psql('postgres', 'DROP SCHEMA test_update_node CASCADE;') + node.psql('postgres', 'DROP EXTENSION pg_pathman CASCADE;') + + def test_concurrent_updates(self): + ''' + Test whether conncurrent updates work correctly between + partitions. + ''' + + create_sql = ''' + CREATE TABLE test1(id INT, b INT NOT NULL); + INSERT INTO test1 + SELECT i, i FROM generate_series(1, 100) i; + SELECT create_range_partitions('test1', 'b', 1, 5); + ''' + + with self.start_new_pathman_cluster(enable_partitionrouter=True) as node: + node.safe_psql(create_sql) + + pool = multiprocessing.Pool(processes=4) + for count in range(1, 200): + pool.apply_async(make_updates, (node, count, )) + + pool.close() + pool.join() + + # check all data is there and not duplicated + with node.connect() as con: + for i in range(1, 100): + row = con.execute("select count(*) from test1 where id = %d" % i)[0] + self.assertEqual(row[0], 1) + + self.assertEqual(node.execute("select count(*) from test1")[0][0], 100) + + +def make_updates(node, count): + update_sql = ''' + BEGIN; + UPDATE test1 SET b = trunc(random() * 100 + 1) WHERE id in (%s); + COMMIT; + ''' + + with node.connect() as con: + for i in range(count): + rows_to_update = random.randint(20, 50) + ids = set([str(random.randint(1, 100)) for i in range(rows_to_update)]) + con.execute(update_sql % ','.join(ids)) if __name__ == "__main__": - unittest.main() + if len(sys.argv) > 1: + suite = unittest.TestLoader().loadTestsFromName(sys.argv[1], + module=sys.modules[__name__]) + else: + suite = unittest.TestLoader().loadTestsFromTestCase(Tests) + + configure_testgres(use_python_logging=True) + result = unittest.TextTestRunner(verbosity=2, failfast=True).run(suite) + if not result.wasSuccessful(): + sys.exit(1) diff --git a/tests/update/README.md b/tests/update/README.md index f31f4116..fd042822 100644 --- a/tests/update/README.md +++ b/tests/update/README.md @@ -9,3 +9,9 @@ PG_CONFIG=... ./dump_pathman_objects %DBNAME% diff file_1 file_2 ``` + +check_update.py script tries to verify that update is ok automatically. For +instance, +```bash +tests/update/check_update.py d34a77e worktree +``` diff --git a/tests/update/check_update.py b/tests/update/check_update.py new file mode 100755 index 00000000..4bd740f6 --- /dev/null +++ b/tests/update/check_update.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +#coding: utf-8 + +import shutil +import os +import contextlib +import sys +import argparse +import testgres +import subprocess +import time + +my_dir = os.path.dirname(os.path.abspath(__file__)) +repo_dir = os.path.abspath(os.path.join(my_dir, '../../')) +print(repo_dir) + +# just bunch of tables to create +run_sql = ''' +CREATE EXTENSION pg_pathman; + +CREATE TABLE hash_rel ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel VALUES (1, 1); +INSERT INTO hash_rel VALUES (2, 2); +INSERT INTO hash_rel VALUES (3, 3); + +SELECT create_hash_partitions('hash_rel', 'Value', 3); + +CREATE TABLE range_rel ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP not null, + txt TEXT); +CREATE INDEX ON range_rel (dt); +INSERT INTO range_rel (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-04-30', '1 day'::interval) as g; +SELECT create_range_partitions('range_rel', 'DT', '2015-01-01'::DATE, '1 month'::INTERVAL); + +CREATE TABLE num_range_rel ( + id SERIAL PRIMARY KEY, + txt TEXT); +SELECT create_range_partitions('num_range_rel', 'id', 0, 1000, 4); +INSERT INTO num_range_rel + SELECT g, md5(g::TEXT) FROM generate_series(1, 3000) as g; + +CREATE TABLE improved_dummy_test1 (id BIGSERIAL, name TEXT NOT NULL); +INSERT INTO improved_dummy_test1 (name) SELECT md5(g::TEXT) FROM generate_series(1, 100) as g; +SELECT create_range_partitions('improved_dummy_test1', 'id', 1, 10); +INSERT INTO improved_dummy_test1 (name) VALUES ('test'); /* spawns new partition */ +ALTER TABLE improved_dummy_test1 ADD CHECK (name != 'ib'); + +CREATE TABLE test_improved_dummy_test2 (val INT NOT NULL); +SELECT create_range_partitions('test_improved_dummy_test2', 'val', + generate_range_bounds(1, 1, 2), + partition_names := '{p1, p2}'); + +CREATE TABLE insert_into_select(val int NOT NULL); +INSERT INTO insert_into_select SELECT generate_series(1, 100); +SELECT create_range_partitions('insert_into_select', 'val', 1, 20); +CREATE TABLE insert_into_select_copy (LIKE insert_into_select); /* INSERT INTO ... SELECT ... */ + +-- just a lot of actions + +SELECT split_range_partition('num_range_rel_1', 500); +SELECT split_range_partition('range_rel_1', '2015-01-15'::DATE); + +/* Merge two partitions into one */ +SELECT merge_range_partitions('num_range_rel_1', 'num_range_rel_' || currval('num_range_rel_seq')); +SELECT merge_range_partitions('range_rel_1', 'range_rel_' || currval('range_rel_seq')); + +/* Append and prepend partitions */ +SELECT append_range_partition('num_range_rel'); +SELECT prepend_range_partition('num_range_rel'); +SELECT drop_range_partition('num_range_rel_7'); + +SELECT drop_range_partition_expand_next('num_range_rel_4'); +SELECT drop_range_partition_expand_next('num_range_rel_6'); + +SELECT append_range_partition('range_rel'); +SELECT prepend_range_partition('range_rel'); +SELECT drop_range_partition('range_rel_7'); +SELECT add_range_partition('range_rel', '2014-12-01'::DATE, '2015-01-01'::DATE); + +CREATE TABLE range_rel_minus_infinity (LIKE range_rel INCLUDING ALL); +SELECT attach_range_partition('range_rel', 'range_rel_minus_infinity', NULL, '2014-12-01'::DATE); +INSERT INTO range_rel (dt) VALUES ('2012-06-15'); +INSERT INTO range_rel (dt) VALUES ('2015-12-15'); + +CREATE TABLE hash_rel_extern (LIKE hash_rel INCLUDING ALL); +SELECT replace_hash_partition('hash_rel_0', 'hash_rel_extern'); + +-- automatic partitions creation +CREATE TABLE range_rel_test1 ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + data TEXT); +SELECT create_range_partitions('range_rel_test1', 'dt', '2015-01-01'::DATE, '10 days'::INTERVAL, 1); +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2015-01-01', '2015-04-30', '1 day'::interval); + +INSERT INTO range_rel_test1 (dt) +SELECT generate_series('2014-12-31', '2014-12-01', '-1 day'::interval); + +/* CaMeL cAsE table names and attributes */ +CREATE TABLE "TeSt" (a INT NOT NULL, b INT); +SELECT create_hash_partitions('"TeSt"', 'a', 3); +INSERT INTO "TeSt" VALUES (1, 1); +INSERT INTO "TeSt" VALUES (2, 2); +INSERT INTO "TeSt" VALUES (3, 3); + +CREATE TABLE "RangeRel" ( + id SERIAL PRIMARY KEY, + dt TIMESTAMP NOT NULL, + txt TEXT); +INSERT INTO "RangeRel" (dt, txt) +SELECT g, md5(g::TEXT) FROM generate_series('2015-01-01', '2015-01-03', '1 day'::interval) as g; +SELECT create_range_partitions('"RangeRel"', 'dt', '2015-01-01'::DATE, '1 day'::INTERVAL); +SELECT append_range_partition('"RangeRel"'); +SELECT prepend_range_partition('"RangeRel"'); +SELECT merge_range_partitions('"RangeRel_1"', '"RangeRel_' || currval('"RangeRel_seq"') || '"'); +SELECT split_range_partition('"RangeRel_1"', '2015-01-01'::DATE); + +CREATE TABLE hash_rel_next1 ( + id SERIAL PRIMARY KEY, + value INTEGER NOT NULL); +INSERT INTO hash_rel_next1 (value) SELECT g FROM generate_series(1, 10000) as g; +SELECT create_hash_partitions('hash_rel_next1', 'value', 3); +''' + +def shell(cmd): + print(cmd) + cp = subprocess.run(cmd, shell=True) + if cp.returncode != 0: + raise subprocess.CalledProcessError(cp.returncode, cmd) + # print(subprocess.check_output(cmd, shell=True).decode("utf-8")) + +def shell_call(cmd): + print(cmd) + return subprocess.run(cmd, shell=True) + +def reinstall_pathman(tmp_pathman_path, revision): + if revision == 'worktree': + shutil.rmtree(tmp_pathman_path) + shutil.copytree(repo_dir, tmp_pathman_path) + os.chdir(tmp_pathman_path) + else: + os.chdir(tmp_pathman_path) + shell("git clean -fdx") + shell("git reset --hard") + shell("git checkout %s" % revision) + shell('make USE_PGXS=1 clean && make USE_PGXS=1 install -j4') + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=''' + pg_pathman update checker. Testgres is used. Junks into /tmp/pathman_check_update. + First do some partitioned stuff on new version. Save full database dump to + dump_new.sql and pathman object definitions to pathman_objects_new.sql. + Then run old version, do the same stuff. Upgrade and make dumps. Ensure + dumps are the same. Finally, run regressions tests on upgraded version. + ''') + parser.add_argument('branches', nargs=2, + help='specify branches , e.g. "d34a77e master". Special value "worktree" means, well, working tree.') + args = parser.parse_args() + old_branch, new_branch = args.branches[0], args.branches[1] + + pathman_objs_script = os.path.join(my_dir, 'dump_pathman_objects.sql') + + data_prefix = "/tmp/pathman_check_update" + if os.path.isdir(data_prefix): + shutil.rmtree(data_prefix) + dump_new_path = os.path.join(data_prefix, 'dump_new.sql') + dump_updated_path = os.path.join(data_prefix, 'dump_updated.sql') + dump_diff_path = os.path.join(data_prefix, 'dump.diff') + pathman_objs_new_path = os.path.join(data_prefix, 'pathman_objects_new.sql') + pathman_objs_updated_path = os.path.join(data_prefix, 'pathman_objects_updated.sql') + pathman_objs_diff_path = os.path.join(data_prefix, 'pathman_objs.diff') + tmp_pathman_path = os.path.join(data_prefix, "pg_pathman") + + shutil.copytree(repo_dir, tmp_pathman_path) + + reinstall_pathman(tmp_pathman_path, new_branch) + with testgres.get_new_node('brand_new') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + node.start() + node.safe_psql('postgres', run_sql) + node.dump(dump_new_path, 'postgres') + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_new_path)) + node.stop() + + # now install old version... + reinstall_pathman(tmp_pathman_path, old_branch) + with testgres.get_new_node('updated') as node: + node.init() + node.append_conf("shared_preload_libraries='pg_pathman'\n") + + node.start() + # do the same stuff... + node.safe_psql('postgres', run_sql) + # and prepare regression db, see below + node.safe_psql('postgres', 'create database contrib_regression') + node.safe_psql('contrib_regression', 'create extension pg_pathman') + + # and upgrade pathman + node.stop() + reinstall_pathman(tmp_pathman_path, new_branch) + node.start() + print("Running updated db on port {}, datadir {}".format(node.port, node.base_dir)) + node.safe_psql('postgres', "alter extension pg_pathman update") + node.safe_psql('postgres', "set pg_pathman.enable = t;") + + # regression tests db, see below + node.safe_psql('contrib_regression', "alter extension pg_pathman update") + node.safe_psql('contrib_regression', "set pg_pathman.enable = t;") + + node.dump(dump_updated_path, 'postgres') + # time.sleep(432432) + # default user is current OS one + shell("psql -p {} -h {} -f {} -X -q -a -At > {} 2>&1".format(node.port, node.host, pathman_objs_script, pathman_objs_updated_path)) + + # check diffs + shell_call("diff -U3 {} {} > {} 2>&1".format(dump_updated_path, dump_new_path, dump_diff_path)) + if os.stat(dump_diff_path).st_size != 0: + msg = "DB dumps are not equal, check out the diff at {}\nProbably that's actually ok, please eyeball the diff manually and say, continue?".format(dump_diff_path) + if input("%s (y/N) " % msg).lower() != 'y': + sys.exit(1) + shell_call("diff -U3 {} {} > {} 2>&1".format(pathman_objs_updated_path, pathman_objs_new_path, pathman_objs_diff_path)) + if os.stat(pathman_objs_diff_path).st_size != 0: + print("pathman objects dumps are not equal, check out the diff at {}".format(pathman_objs_diff_path)) + # sys.exit(1) + + print("just in case, checking that dump can be restored...") + node.safe_psql('postgres', 'create database tmp') + node.restore(dump_updated_path, 'tmp') + + print("finally, run (some) pathman regression tests") + # This is a bit tricky because we want to run tests on exactly this + # installation of extension. It means we must create db beforehand, + # tell pg_regress not create it and discard all create/drop extension + # from tests. + # Not all tests can be thus adapted instantly, so I think that's enough + # for now. + # generated with smth like ls ~/postgres/pg_pathman/sql/ | sort | sed 's/.sql//' | xargs -n 1 printf "'%s',\n" + os.chdir(tmp_pathman_path) + REGRESS = ['pathman_array_qual', + 'pathman_bgw', + 'pathman_callbacks', + 'pathman_column_type', + 'pathman_cte', + 'pathman_domains', + 'pathman_dropped_cols', + 'pathman_expressions', + 'pathman_foreign_keys', + 'pathman_gaps', + 'pathman_inserts', + 'pathman_interval', + 'pathman_lateral', + 'pathman_only', + 'pathman_param_upd_del', + 'pathman_permissions', + 'pathman_rebuild_deletes', + 'pathman_rebuild_updates', + 'pathman_rowmarks', + 'pathman_subpartitions', + 'pathman_update_node', + 'pathman_update_triggers', + 'pathman_utility_stmt', + 'pathman_views' + ] + outfiles = os.listdir(os.path.join(tmp_pathman_path, 'expected')) + for tname in REGRESS: + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' sql/{}.sql".format(tname)) + # CASCADE also removed + shell("sed -i '/DROP EXTENSION pg_pathman/d' sql/{}.sql".format(tname)) + # there might be more then one .out file + for outfile in outfiles: + if outfile.startswith(tname): + shell("sed -i '/CREATE EXTENSION pg_pathman;/d' expected/{}".format(outfile)) + shell("sed -i '/DROP EXTENSION pg_pathman/d' expected/{}".format(outfile)) + + # time.sleep(43243242) + shell("make USE_PGXS=1 PGPORT={} EXTRA_REGRESS_OPTS=--use-existing REGRESS='{}' installcheck 2>&1".format(node.port, " ".join(REGRESS))) + + node.stop() + + print("It's Twelve O'clock and All's Well.") diff --git a/tests/update/dump_pathman_objects b/tests/update/dump_pathman_objects.sql old mode 100755 new mode 100644 similarity index 68% rename from tests/update/dump_pathman_objects rename to tests/update/dump_pathman_objects.sql index fff1ed17..e1a632ca --- a/tests/update/dump_pathman_objects +++ b/tests/update/dump_pathman_objects.sql @@ -1,17 +1,4 @@ -#!/usr/bin/bash - - -rndstr=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13 ; echo '') -bindir=$($PG_CONFIG --bindir) -dbname=$1 -flname=pathman_objects_$rndstr.txt - -# show file name -echo $flname - -$bindir/psql $dbname << EOF - -\o $flname +CREATE EXTENSION IF NOT EXISTS pg_pathman; SELECT pg_get_functiondef(objid) FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid @@ -27,5 +14,3 @@ ORDER BY objid::regprocedure::TEXT ASC; \d+ pathman_partition_list \d+ pathman_cache_stats \d+ pathman_concurrent_part_tasks - -EOF diff --git a/tests/update/get_sql_diff b/tests/update/get_sql_diff new file mode 100755 index 00000000..876717a8 --- /dev/null +++ b/tests/update/get_sql_diff @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +PG_VER=$1 +WORK_DIR=/tmp/pg_pathman +BRANCH_1=$2 +BRANCH_2=$3 + + +if [ -z "$PG_VER" ]; then + PG_VER=10 +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_1=master +fi + +if [ -z "$BRANCH_1" ]; then + BRANCH_2=$(git tag | sort -V | tail -1) +fi + + +printf "PG:\\t$PG_VER\\n" +printf "BRANCH_1:\\t$BRANCH_1\\n" +printf "BRANCH_2:\\t$BRANCH_2\\n" + + +cp -R "$(dirname $0)" "$WORK_DIR" + +git checkout "$BRANCH_1" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_1 + +git checkout "$BRANCH_2" + +norsu pgxs "$PG_VER" -- clean install +norsu run "$PG_VER" --pgxs --psql < "$WORK_DIR"/dump_pathman_objects.sql > "$WORK_DIR"/dump_2 + +diff -u "$WORK_DIR"/dump_1 "$WORK_DIR"/dump_2 > "$WORK_DIR"/diff diff --git a/travis/dep-ubuntu-llvm.sh b/travis/dep-ubuntu-llvm.sh deleted file mode 100755 index e640d5b5..00000000 --- a/travis/dep-ubuntu-llvm.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/llvm-snapshot.gpg.key | sudo apt-key add - -echo "deb http://apt.llvm.org/trusty/ llvm-toolchain-$(lsb_release -cs)-$LLVM_VER main" | sudo tee /etc/apt/sources.list.d/llvm.list diff --git a/travis/dep-ubuntu-postgres.sh b/travis/dep-ubuntu-postgres.sh deleted file mode 100755 index 41c7d346..00000000 --- a/travis/dep-ubuntu-postgres.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cat ./travis/postgresql.gpg.key | sudo apt-key add - -echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PG_VER" | sudo tee /etc/apt/sources.list.d/pgdg.list diff --git a/travis/llvm-snapshot.gpg.key b/travis/llvm-snapshot.gpg.key deleted file mode 100644 index aa6b105a..00000000 --- a/travis/llvm-snapshot.gpg.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.4.12 (GNU/Linux) - -mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM -EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM -R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2 -B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY -Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT -DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1 -G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/ -ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU -cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq -7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc -Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB -tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz -dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE -FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC -9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR -udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX -wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn -l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv -gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W -R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg -hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx -K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya -KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B -MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7 -BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g -zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc -bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC -DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw -F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta -RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/ -21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV -ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+ -M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa -xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ -d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/ -fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X -OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB -pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML -PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL -wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd -oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l -tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG -5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP -LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov -1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3 -krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN -bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw== -=j+4q ------END PGP PUBLIC KEY BLOCK----- diff --git a/travis/pg-travis-test.sh b/travis/pg-travis-test.sh deleted file mode 100755 index 5c0ec44e..00000000 --- a/travis/pg-travis-test.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/bash - -set -eux - -sudo apt-get update - - -# required packages -apt_packages="postgresql-$PG_VER postgresql-server-dev-$PG_VER postgresql-common python-pip python-dev build-essential" -pip_packages="testgres" - -# exit code -status=0 - -# pg_config path -pg_ctl_path=/usr/lib/postgresql/$PG_VER/bin/pg_ctl -initdb_path=/usr/lib/postgresql/$PG_VER/bin/initdb -config_path=/usr/lib/postgresql/$PG_VER/bin/pg_config - - -# bug: http://www.postgresql.org/message-id/20130508192711.GA9243@msgid.df7cb.de -sudo update-alternatives --remove-all postmaster.1.gz - -# stop all existing instances (because of https://github.com/travis-ci/travis-cookbooks/pull/221) -sudo service postgresql stop -# ... and make sure they don't come back -echo 'exit 0' | sudo tee /etc/init.d/postgresql -sudo chmod a+x /etc/init.d/postgresql - -# install required packages -sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install -qq $apt_packages - - -# perform code analysis if necessary -if [ $CHECK_CODE = "true" ]; then - - if [ "$CC" = "clang" ]; then - sudo apt-get -y install -qq clang-$LLVM_VER - - scan-build-$LLVM_VER --status-bugs make USE_PGXS=1 PG_CONFIG=$config_path || status=$? - exit $status - - elif [ "$CC" = "gcc" ]; then - sudo apt-get -y install -qq cppcheck - - cppcheck --template "{file} ({line}): {severity} ({id}): {message}" \ - --enable=warning,portability,performance \ - --suppress=redundantAssignment \ - --suppress=uselessAssignmentPtrArg \ - --suppress=incorrectStringBooleanError \ - --std=c89 src/*.c src/*.h 2> cppcheck.log - - if [ -s cppcheck.log ]; then - cat cppcheck.log - status=1 # error - fi - - exit $status - fi - - # don't forget to "make clean" - make clean USE_PGXS=1 PG_CONFIG=$config_path -fi - - -# create cluster 'test' -CLUSTER_PATH=$(pwd)/test_cluster -$initdb_path -D $CLUSTER_PATH -U $USER -A trust - -# build pg_pathman (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path CFLAGS_SL="$($config_path --cflags_sl) -coverage" -sudo make install USE_PGXS=1 PG_CONFIG=$config_path - -# check build -status=$? -if [ $status -ne 0 ]; then exit $status; fi - -# set permission to write postgres locks -sudo chown $USER /var/run/postgresql/ - -# add pg_pathman to shared_preload_libraries and restart cluster 'test' -echo "shared_preload_libraries = 'pg_pathman'" >> $CLUSTER_PATH/postgresql.conf -echo "port = 55435" >> $CLUSTER_PATH/postgresql.conf -$pg_ctl_path -D $CLUSTER_PATH start -l postgres.log -w - -# run regression tests -PGPORT=55435 PGUSER=$USER PG_CONFIG=$config_path make installcheck USE_PGXS=1 || status=$? - -# show diff if it exists -if test -f regression.diffs; then cat regression.diffs; fi - - -set +u - -# create virtual environment and activate it -virtualenv /tmp/envs/pg_pathman -source /tmp/envs/pg_pathman/bin/activate - -# install pip packages -pip3 install $pip_packages - -# run python tests -make USE_PGXS=1 PG_CONFIG=$config_path python_tests || status=$? - -# deactivate virtual environment -deactivate - -set -u - - -# install cmake for cmocka -sudo apt-get -y install -qq cmake - -# build & install cmocka -CMOCKA_VER=1.1.1 -cd tests/cmocka -tar xf cmocka-$CMOCKA_VER.tar.xz -cd cmocka-$CMOCKA_VER -mkdir build && cd build -cmake .. -make && sudo make install -cd ../../../.. - -# export path to libcmocka.so -LD_LIBRARY_PATH=/usr/local/lib -export LD_LIBRARY_PATH - -# run cmocka tests (using CFLAGS_SL for gcov) -make USE_PGXS=1 PG_CONFIG=$config_path PG_CPPFLAGS="-coverage" cmocka_tests || status=$? - -# remove useless gcov files -rm -f tests/cmocka/*.gcno -rm -f tests/cmocka/*.gcda - -#generate *.gcov files -gcov src/*.c src/compat/*.c src/include/*.h src/include/compat/*.h - - -exit $status diff --git a/travis/postgresql.gpg.key b/travis/postgresql.gpg.key deleted file mode 100644 index 8480576e..00000000 --- a/travis/postgresql.gpg.key +++ /dev/null @@ -1,77 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBE6XR8IBEACVdDKT2HEH1IyHzXkb4nIWAY7echjRxo7MTcj4vbXAyBKOfjja -UrBEJWHN6fjKJXOYWXHLIYg0hOGeW9qcSiaa1/rYIbOzjfGfhE4x0Y+NJHS1db0V -G6GUj3qXaeyqIJGS2z7m0Thy4Lgr/LpZlZ78Nf1fliSzBlMo1sV7PpP/7zUO+aA4 -bKa8Rio3weMXQOZgclzgeSdqtwKnyKTQdXY5MkH1QXyFIk1nTfWwyqpJjHlgtwMi -c2cxjqG5nnV9rIYlTTjYG6RBglq0SmzF/raBnF4Lwjxq4qRqvRllBXdFu5+2pMfC -IZ10HPRdqDCTN60DUix+BTzBUT30NzaLhZbOMT5RvQtvTVgWpeIn20i2NrPWNCUh -hj490dKDLpK/v+A5/i8zPvN4c6MkDHi1FZfaoz3863dylUBR3Ip26oM0hHXf4/2U -A/oA4pCl2W0hc4aNtozjKHkVjRx5Q8/hVYu+39csFWxo6YSB/KgIEw+0W8DiTII3 -RQj/OlD68ZDmGLyQPiJvaEtY9fDrcSpI0Esm0i4sjkNbuuh0Cvwwwqo5EF1zfkVj -Tqz2REYQGMJGc5LUbIpk5sMHo1HWV038TWxlDRwtOdzw08zQA6BeWe9FOokRPeR2 -AqhyaJJwOZJodKZ76S+LDwFkTLzEKnYPCzkoRwLrEdNt1M7wQBThnC5z6wARAQAB -tBxQb3N0Z3JlU1FMIERlYmlhbiBSZXBvc2l0b3J5iQJOBBMBCAA4AhsDBQsJCAcD -BRUKCQgLBRYCAwEAAh4BAheAFiEEuXsK/KoaR/BE8kSgf8x9RqzMTPgFAlhtCD8A -CgkQf8x9RqzMTPgECxAAk8uL+dwveTv6eH21tIHcltt8U3Ofajdo+D/ayO53LiYO -xi27kdHD0zvFMUWXLGxQtWyeqqDRvDagfWglHucIcaLxoxNwL8+e+9hVFIEskQAY -kVToBCKMXTQDLarz8/J030Pmcv3ihbwB+jhnykMuyyNmht4kq0CNgnlcMCdVz0d3 -z/09puryIHJrD+A8y3TD4RM74snQuwc9u5bsckvRtRJKbP3GX5JaFZAqUyZNRJRJ -Tn2OQRBhCpxhlZ2afkAPFIq2aVnEt/Ie6tmeRCzsW3lOxEH2K7MQSfSu/kRz7ELf -Cz3NJHj7rMzC+76Rhsas60t9CjmvMuGONEpctijDWONLCuch3Pdj6XpC+MVxpgBy -2VUdkunb48YhXNW0jgFGM/BFRj+dMQOUbY8PjJjsmVV0joDruWATQG/M4C7O8iU0 -B7o6yVv4m8LDEN9CiR6r7H17m4xZseT3f+0QpMe7iQjz6XxTUFRQxXqzmNnloA1T -7VjwPqIIzkj/u0V8nICG/ktLzp1OsCFatWXh7LbU+hwYl6gsFH/mFDqVxJ3+DKQi -vyf1NatzEwl62foVjGUSpvh3ymtmtUQ4JUkNDsXiRBWczaiGSuzD9Qi0ONdkAX3b -ewqmN4TfE+XIpCPxxHXwGq9Rv1IFjOdCX0iG436GHyTLC1tTUIKF5xV4Y0+cXIOI -RgQQEQgABgUCTpdI7gAKCRDFr3dKWFELWqaPAKD1TtT5c3sZz92Fj97KYmqbNQZP -+ACfSC6+hfvlj4GxmUjp1aepoVTo3weJAhwEEAEIAAYFAk6XSQsACgkQTFprqxLS -p64F8Q//cCcutwrH50UoRFejg0EIZav6LUKejC6kpLeubbEtuaIH3r2zMblPGc4i -+eMQKo/PqyQrceRXeNNlqO6/exHozYi2meudxa6IudhwJIOn1MQykJbNMSC2sGUp -1W5M1N5EYgt4hy+qhlfnD66LR4G+9t5FscTJSy84SdiOuqgCOpQmPkVRm1HX5X1+ -dmnzMOCk5LHHQuiacV0qeGO7JcBCVEIDr+uhU1H2u5GPFNHm5u15n25tOxVivb94 -xg6NDjouECBH7cCVuW79YcExH/0X3/9G45rjdHlKPH1OIUJiiX47OTxdG3dAbB4Q -fnViRJhjehFscFvYWSqXo3pgWqUsEvv9qJac2ZEMSz9x2mj0ekWxuM6/hGWxJdB+ -+985rIelPmc7VRAXOjIxWknrXnPCZAMlPlDLu6+vZ5BhFX0Be3y38f7GNCxFkJzl -hWZ4Cj3WojMj+0DaC1eKTj3rJ7OJlt9S9xnO7OOPEUTGyzgNIDAyCiu8F4huLPaT -ape6RupxOMHZeoCVlqx3ouWctelB2oNXcxxiQ/8y+21aHfD4n/CiIFwDvIQjl7dg -mT3u5Lr6yxuosR3QJx1P6rP5ZrDTP9khT30t+HZCbvs5Pq+v/9m6XDmi+NlU7Zuh -Ehy97tL3uBDgoL4b/5BpFL5U9nruPlQzGq1P9jj40dxAaDAX/WKJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlB5KywFCQPDFt8ACgkQf8x9RqzM -TPhuCQ//QAjRSAOCQ02qmUAikT+mTB6baOAakkYq6uHbEO7qPZkv4E/M+HPIJ4wd -nBNeSQjfvdNcZBA/x0hr5EMcBneKKPDj4hJ0panOIRQmNSTThQw9OU351gm3YQct -AMPRUu1fTJAL/AuZUQf9ESmhyVtWNlH/56HBfYjE4iVeaRkkNLJyX3vkWdJSMwC/ -LO3Lw/0M3R8itDsm74F8w4xOdSQ52nSRFRh7PunFtREl+QzQ3EA/WB4AIj3VohIG -kWDfPFCzV3cyZQiEnjAe9gG5pHsXHUWQsDFZ12t784JgkGyO5wT26pzTiuApWM3k -/9V+o3HJSgH5hn7wuTi3TelEFwP1fNzI5iUUtZdtxbFOfWMnZAypEhaLmXNkg4zD -kH44r0ss9fR0DAgUav1a25UnbOn4PgIEQy2fgHKHwRpCy20d6oCSlmgyWsR40EPP -YvtGq49A2aK6ibXmdvvFT+Ts8Z+q2SkFpoYFX20mR2nsF0fbt1lfH65P64dukxeR -GteWIeNakDD40bAAOH8+OaoTGVBJ2ACJfLVNM53PEoftavAwUYMrR910qvwYfd/4 -6rh46g1Frr9SFMKYE9uvIJIgDsQB3QBp71houU4H55M5GD8XURYs+bfiQpJG1p7e -B8e5jZx1SagNWc4XwL2FzQ9svrkbg1Y+359buUiP7T6QXX2zY++JAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlEqbZUFCQg2wEEACgkQf8x9RqzM -TPhFMQ//WxAfKMdpSIA9oIC/yPD/dJpY/+DyouOljpE6MucMy/ArBECjFTBwi/j9 -NYM4ynAk34IkhuNexc1i9/05f5RM6+riLCLgAOsADDbHD4miZzoSxiVr6GQ3YXMb -OGld9kV9Sy6mGNjcUov7iFcf5Hy5w3AjPfKuR9zXswyfzIU1YXObiiZT38l55pp/ -BSgvGVQsvbNjsff5CbEKXS7q3xW+WzN0QWF6YsfNVhFjRGj8hKtHvwKcA02wwjLe -LXVTm6915ZUKhZXUFc0vM4Pj4EgNswH8Ojw9AJaKWJIZmLyW+aP+wpu6YwVCicxB -Y59CzBO2pPJDfKFQzUtrErk9irXeuCCLesDyirxJhv8o0JAvmnMAKOLhNFUrSQ2m -+3EnF7zhfz70gHW+EG8X8mL/EN3/dUM09j6TVrjtw43RLxBzwMDeariFF9yC+5bL -tnGgxjsB9Ik6GV5v34/NEEGf1qBiAzFmDVFRZlrNDkq6gmpvGnA5hUWNr+y0i01L -jGyaLSWHYjgw2UEQOqcUtTFK9MNzbZze4mVaHMEz9/aMfX25R6qbiNqCChveIm8m -Yr5Ds2zdZx+G5bAKdzX7nx2IUAxFQJEE94VLSp3npAaTWv3sHr7dR8tSyUJ9poDw -gw4W9BIcnAM7zvFYbLF5FNggg/26njHCCN70sHt8zGxKQINMc6SJAj0EEwEIACcC -GwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AFAlLpFRkFCQ6EJy0ACgkQf8x9RqzM -TPjOZA//Zp0e25pcvle7cLc0YuFr9pBv2JIkLzPm83nkcwKmxaWayUIG4Sv6pH6h -m8+S/CHQij/yFCX+o3ngMw2J9HBUvafZ4bnbI0RGJ70GsAwraQ0VlkIfg7GUw3Tz -voGYO42rZTru9S0K/6nFP6D1HUu+U+AsJONLeb6oypQgInfXQExPZyliUnHdipei -4WR1YFW6sjSkZT/5C3J1wkAvPl5lvOVthI9Zs6bZlJLZwusKxU0UM4Btgu1Sf3nn -JcHmzisixwS9PMHE+AgPWIGSec/N27a0KmTTvImV6K6nEjXJey0K2+EYJuIBsYUN -orOGBwDFIhfRk9qGlpgt0KRyguV+AP5qvgry95IrYtrOuE7307SidEbSnvO5ezNe -mE7gT9Z1tM7IMPfmoKph4BfpNoH7aXiQh1Wo+ChdP92hZUtQrY2Nm13cmkxYjQ4Z -gMWfYMC+DA/GooSgZM5i6hYqyyfAuUD9kwRN6BqTbuAUAp+hCWYeN4D88sLYpFh3 -paDYNKJ+Gf7Yyi6gThcV956RUFDH3ys5Dk0vDL9NiWwdebWfRFbzoRM3dyGP889a -OyLzS3mh6nHzZrNGhW73kslSQek8tjKrB+56hXOnb4HaElTZGDvD5wmrrhN94kby -Gtz3cydIohvNO9d90+29h0eGEDYti7j7maHkBKUAwlcPvMg5m3Y= -=DA1T ------END PGP PUBLIC KEY BLOCK-----